From 41eaf86af7f6a8db76ab1f9a9212178d26d04fce Mon Sep 17 00:00:00 2001 From: William Astorga Date: Thu, 12 Oct 2023 23:36:13 -0600 Subject: [PATCH 1/6] fix: update builder_version from 4.1.0 to 4.1.3 --- Splunk-TA-cisco-dnacenter/app.manifest | 2 +- .../static/js/build/globalConfig.json | 8 + .../Splunk_TA_cisco_dnacenter_rh_account.py | 0 ...co_dnacenter_rh_cisco_dnac_clienthealth.py | 0 ...isco_dnacenter_rh_cisco_dnac_compliance.py | 0 ...co_dnacenter_rh_cisco_dnac_devicehealth.py | 0 ..._TA_cisco_dnacenter_rh_cisco_dnac_issue.py | 0 ...o_dnacenter_rh_cisco_dnac_networkhealth.py | 0 ...nacenter_rh_cisco_dnac_securityadvisory.py | 0 .../Splunk_TA_cisco_dnacenter_rh_settings.py | 0 .../bin/cisco_dnac_clienthealth.py | 0 .../bin/cisco_dnac_compliance.py | 0 .../bin/cisco_dnac_devicehealth.py | 0 .../bin/cisco_dnac_issue.py | 0 .../bin/cisco_dnac_networkhealth.py | 0 .../bin/cisco_dnac_securityadvisory.py | 0 .../input_module_cisco_dnac_clienthealth.py | 0 .../bin/input_module_cisco_dnac_compliance.py | 0 .../input_module_cisco_dnac_devicehealth.py | 0 .../bin/input_module_cisco_dnac_issue.py | 0 .../input_module_cisco_dnac_networkhealth.py | 0 ...nput_module_cisco_dnac_securityadvisory.py | 0 .../aob_py3/Mako-1.1.0.dist-info/AUTHORS | 13 - .../aob_py3/Mako-1.1.0.dist-info/RECORD | 36 - .../Mako-1.1.0.dist-info/entry_points.txt | 20 - .../INSTALLER | 0 .../LICENSE | 4 +- .../METADATA | 23 +- .../aob_py3/Mako-1.2.4.dist-info/RECORD | 42 + .../REQUESTED | 0 .../WHEEL | 2 +- .../Mako-1.2.4.dist-info/entry_points.txt | 18 + .../top_level.txt | 0 .../aob_py3/MarkupSafe-2.0.1.dist-info/RECORD | 1 + .../REQUESTED | 0 .../aob_py3/_distutils_hack/__init__.py | 220 - .../aob_py3/_distutils_hack/override.py | 1 - .../aob_py3/_pyrsistent_version.py | 2 +- .../aob_py3/attr/__init__.py | 96 +- .../aob_py3/attr/__init__.pyi | 125 +- .../aob_py3/attr/_cmp.py | 45 +- .../aob_py3/attr/_cmp.pyi | 18 +- .../aob_py3/attr/_compat.py | 236 +- .../aob_py3/attr/_config.py | 2 - .../aob_py3/attr/_funcs.py | 139 +- .../aob_py3/attr/_make.py | 922 +-- .../aob_py3/attr/_next_gen.py | 32 +- .../aob_py3/attr/_typing_compat.pyi | 15 + .../aob_py3/attr/_version_info.py | 3 +- .../aob_py3/attr/converters.py | 37 +- .../aob_py3/attr/converters.pyi | 2 +- .../aob_py3/attr/exceptions.py | 17 +- .../aob_py3/attr/filters.py | 32 +- .../aob_py3/attr/filters.pyi | 4 +- .../aob_py3/attr/setters.py | 10 +- .../aob_py3/attr/setters.pyi | 2 +- .../aob_py3/attr/validators.py | 241 +- .../aob_py3/attr/validators.pyi | 14 +- .../attrs-21.4.0.dist-info/AUTHORS.rst | 11 - .../aob_py3/attrs-21.4.0.dist-info/METADATA | 232 - .../aob_py3/attrs-21.4.0.dist-info/RECORD | 37 - .../attrs-21.4.0.dist-info/top_level.txt | 2 - .../INSTALLER | 0 .../aob_py3/attrs-23.1.0.dist-info/METADATA | 243 + .../aob_py3/attrs-23.1.0.dist-info/RECORD | 36 + .../WHEEL | 2 +- .../licenses}/LICENSE | 2 +- .../aob_py3/attrs/__init__.py | 15 +- .../aob_py3/attrs/__init__.pyi | 10 +- .../aob_py3/attrs/converters.py | 0 .../aob_py3/attrs/exceptions.py | 0 .../aob_py3/attrs/filters.py | 0 .../aob_py3/attrs/setters.py | 0 .../aob_py3/attrs/validators.py | 0 .../aob_py3/bin/futurize | 8 - .../aob_py3/bin/json | 0 .../aob_py3/bin/jsonpath_ng | 0 .../aob_py3/bin/jsonschema | 0 .../aob_py3/bin/mako-render | 0 .../aob_py3/bin/normalizer | 0 .../aob_py3/bin/pasteurize | 8 - .../certifi-2021.10.8.dist-info/RECORD | 10 - .../aob_py3/certifi-2021.10.8.dist-info/WHEEL | 6 - .../INSTALLER | 0 .../LICENSE | 2 +- .../METADATA | 16 +- .../certifi-2022.12.7.dist-info/RECORD | 11 + .../WHEEL | 0 .../top_level.txt | 0 .../aob_py3/certifi/__init__.py | 3 +- .../aob_py3/certifi/__main__.py | 0 .../aob_py3/certifi/cacert.pem | 1129 +-- .../aob_py3/certifi/core.py | 74 +- .../mime/__init__.py => certifi/py.typed} | 0 .../INSTALLER | 0 .../LICENSE | 0 .../METADATA | 2 +- .../RECORD | 18 +- .../WHEEL | 0 .../entry_points.txt | 0 .../top_level.txt | 0 .../aob_py3/charset_normalizer/__init__.py | 0 .../aob_py3/charset_normalizer/api.py | 0 .../charset_normalizer/assets/__init__.py | 0 .../aob_py3/charset_normalizer/cd.py | 0 .../charset_normalizer/cli/__init__.py | 0 .../charset_normalizer/cli/normalizer.py | 0 .../aob_py3/charset_normalizer/constant.py | 0 .../aob_py3/charset_normalizer/legacy.py | 0 .../aob_py3/charset_normalizer/md.py | 2 +- .../aob_py3/charset_normalizer/models.py | 0 .../aob_py3/charset_normalizer/utils.py | 0 .../aob_py3/charset_normalizer/version.py | 2 +- .../cloudconnectlib-3.1.0b3.dist-info/RECORD | 47 - .../INSTALLER | 0 .../LICENSE | 0 .../METADATA | 18 +- .../cloudconnectlib-3.1.3.dist-info/RECORD | 47 + .../REQUESTED | 0 .../cloudconnectlib-3.1.3.dist-info/WHEEL | 4 + .../aob_py3/cloudconnectlib/__init__.py | 9 +- .../aob_py3/cloudconnectlib/client.py | 24 +- .../cloudconnectlib/common/lib_util.py | 19 +- .../aob_py3/cloudconnectlib/common/log.py | 18 +- .../aob_py3/cloudconnectlib/common/util.py | 44 +- .../cloudconnectlib/configuration/loader.py | 214 +- .../configuration/schema_1_0_0.json | 0 .../core/cacerts/ca_certs_locater.py | 145 - .../cloudconnectlib/core/checkpoint.py | 13 +- .../aob_py3/cloudconnectlib/core/defaults.py | 25 +- .../aob_py3/cloudconnectlib/core/engine.py | 120 +- .../aob_py3/cloudconnectlib/core/engine_v2.py | 31 +- .../cloudconnectlib/core/exceptions.py | 8 +- .../aob_py3/cloudconnectlib/core/ext.py | 210 +- .../aob_py3/cloudconnectlib/core/http.py | 266 +- .../aob_py3/cloudconnectlib/core/job.py | 60 +- .../aob_py3/cloudconnectlib/core/models.py | 91 +- .../aob_py3/cloudconnectlib/core/pipemgr.py | 7 +- .../aob_py3/cloudconnectlib/core/plugin.py | 51 +- .../aob_py3/cloudconnectlib/core/task.py | 232 +- .../aob_py3/cloudconnectlib/core/template.py | 5 +- .../cloud_connect_mod_input.py | 31 +- .../splunktacollectorlib/common/__init__.py | 3 +- .../splunktacollectorlib/common/log.py | 44 +- .../splunktacollectorlib/common/rwlock.py | 10 +- .../common/schema_meta.py | 12 +- .../splunktacollectorlib/config.py | 329 +- .../data_collection/__init__.py | 2 +- .../data_collection/ta_checkpoint_manager.py | 81 +- .../data_collection/ta_config.py | 88 +- .../data_collection/ta_consts.py | 8 +- .../data_collection/ta_data_client.py | 65 +- .../data_collection/ta_data_collector.py | 142 +- .../data_collection/ta_data_loader.py | 48 +- .../data_collection/ta_helper.py | 84 +- .../data_collection/ta_mod_input.py | 145 +- .../splunk_ta_import_declare.py | 6 +- .../ta_cloud_connect_client.py | 30 +- .../aob_py3/decorator.py | 0 .../aob_py3/defusedxml/ElementTree.py | 0 .../aob_py3/defusedxml/__init__.py | 0 .../aob_py3/defusedxml/cElementTree.py | 0 .../aob_py3/defusedxml/common.py | 0 .../aob_py3/defusedxml/expatbuilder.py | 0 .../aob_py3/defusedxml/expatreader.py | 0 .../aob_py3/defusedxml/lxml.py | 0 .../aob_py3/defusedxml/minidom.py | 0 .../aob_py3/defusedxml/pulldom.py | 0 .../aob_py3/defusedxml/sax.py | 0 .../aob_py3/defusedxml/xmlrpc.py | 0 .../aob_py3/distutils-precedence.pth | 1 - .../future-0.18.2.dist-info/LICENSE.txt | 19 - .../aob_py3/future-0.18.2.dist-info/METADATA | 110 - .../aob_py3/future-0.18.2.dist-info/RECORD | 219 - .../future-0.18.2.dist-info/entry_points.txt | 4 - .../future-0.18.2.dist-info/top_level.txt | 4 - .../aob_py3/future/__init__.py | 93 - .../aob_py3/future/backports/__init__.py | 26 - .../aob_py3/future/backports/_markupbase.py | 422 - .../aob_py3/future/backports/datetime.py | 2152 ----- .../future/backports/email/__init__.py | 78 - .../future/backports/email/_encoded_words.py | 232 - .../backports/email/_header_value_parser.py | 2965 ------- .../future/backports/email/_parseaddr.py | 546 -- .../future/backports/email/_policybase.py | 365 - .../future/backports/email/base64mime.py | 120 - .../aob_py3/future/backports/email/charset.py | 409 - .../future/backports/email/encoders.py | 90 - .../aob_py3/future/backports/email/errors.py | 111 - .../future/backports/email/feedparser.py | 525 -- .../future/backports/email/generator.py | 498 -- .../aob_py3/future/backports/email/header.py | 581 -- .../future/backports/email/headerregistry.py | 592 -- .../future/backports/email/iterators.py | 74 - .../aob_py3/future/backports/email/message.py | 882 -- .../backports/email/mime/application.py | 39 - .../future/backports/email/mime/audio.py | 74 - .../future/backports/email/mime/base.py | 25 - .../future/backports/email/mime/image.py | 48 - .../future/backports/email/mime/message.py | 36 - .../future/backports/email/mime/multipart.py | 49 - .../backports/email/mime/nonmultipart.py | 24 - .../future/backports/email/mime/text.py | 44 - .../aob_py3/future/backports/email/parser.py | 135 - .../aob_py3/future/backports/email/policy.py | 193 - .../future/backports/email/quoprimime.py | 326 - .../aob_py3/future/backports/email/utils.py | 400 - .../aob_py3/future/backports/html/__init__.py | 27 - .../aob_py3/future/backports/html/entities.py | 2514 ------ .../aob_py3/future/backports/html/parser.py | 536 -- .../aob_py3/future/backports/http/client.py | 1346 ---- .../future/backports/http/cookiejar.py | 2110 ----- .../aob_py3/future/backports/http/cookies.py | 598 -- .../aob_py3/future/backports/http/server.py | 1226 --- .../aob_py3/future/backports/misc.py | 944 --- .../aob_py3/future/backports/socket.py | 454 -- .../aob_py3/future/backports/socketserver.py | 747 -- .../future/backports/total_ordering.py | 38 - .../aob_py3/future/backports/urllib/error.py | 75 - .../aob_py3/future/backports/urllib/parse.py | 991 --- .../future/backports/urllib/request.py | 2647 ------ .../future/backports/urllib/response.py | 103 - .../future/backports/urllib/robotparser.py | 211 - .../future/backports/xmlrpc/__init__.py | 1 - .../aob_py3/future/backports/xmlrpc/client.py | 1496 ---- .../aob_py3/future/backports/xmlrpc/server.py | 999 --- .../aob_py3/future/builtins/__init__.py | 51 - .../aob_py3/future/builtins/disabled.py | 66 - .../aob_py3/future/builtins/iterators.py | 52 - .../aob_py3/future/builtins/misc.py | 135 - .../aob_py3/future/builtins/new_min_max.py | 59 - .../aob_py3/future/builtins/newnext.py | 70 - .../aob_py3/future/builtins/newround.py | 102 - .../aob_py3/future/builtins/newsuper.py | 114 - .../aob_py3/future/moves/__init__.py | 8 - .../aob_py3/future/moves/_dummy_thread.py | 8 - .../aob_py3/future/moves/_markupbase.py | 8 - .../aob_py3/future/moves/_thread.py | 8 - .../aob_py3/future/moves/builtins.py | 10 - .../aob_py3/future/moves/collections.py | 18 - .../aob_py3/future/moves/configparser.py | 8 - .../aob_py3/future/moves/copyreg.py | 12 - .../aob_py3/future/moves/dbm/__init__.py | 20 - .../aob_py3/future/moves/dbm/dumb.py | 9 - .../aob_py3/future/moves/dbm/gnu.py | 9 - .../aob_py3/future/moves/dbm/ndbm.py | 9 - .../aob_py3/future/moves/html/__init__.py | 31 - .../aob_py3/future/moves/html/entities.py | 8 - .../aob_py3/future/moves/html/parser.py | 8 - .../aob_py3/future/moves/http/__init__.py | 4 - .../aob_py3/future/moves/http/client.py | 8 - .../aob_py3/future/moves/http/cookiejar.py | 8 - .../aob_py3/future/moves/http/cookies.py | 9 - .../aob_py3/future/moves/http/server.py | 20 - .../aob_py3/future/moves/itertools.py | 8 - .../aob_py3/future/moves/pickle.py | 11 - .../aob_py3/future/moves/queue.py | 8 - .../aob_py3/future/moves/reprlib.py | 8 - .../aob_py3/future/moves/socketserver.py | 8 - .../aob_py3/future/moves/subprocess.py | 11 - .../aob_py3/future/moves/sys.py | 8 - .../aob_py3/future/moves/tkinter/__init__.py | 27 - .../future/moves/tkinter/colorchooser.py | 12 - .../future/moves/tkinter/commondialog.py | 12 - .../aob_py3/future/moves/tkinter/constants.py | 12 - .../aob_py3/future/moves/tkinter/dialog.py | 12 - .../aob_py3/future/moves/tkinter/dnd.py | 12 - .../future/moves/tkinter/filedialog.py | 12 - .../aob_py3/future/moves/tkinter/font.py | 12 - .../future/moves/tkinter/messagebox.py | 12 - .../future/moves/tkinter/scrolledtext.py | 12 - .../future/moves/tkinter/simpledialog.py | 12 - .../aob_py3/future/moves/tkinter/tix.py | 12 - .../aob_py3/future/moves/tkinter/ttk.py | 12 - .../aob_py3/future/moves/urllib/__init__.py | 5 - .../aob_py3/future/moves/urllib/error.py | 16 - .../aob_py3/future/moves/urllib/parse.py | 28 - .../aob_py3/future/moves/urllib/request.py | 94 - .../aob_py3/future/moves/urllib/response.py | 12 - .../future/moves/urllib/robotparser.py | 8 - .../aob_py3/future/moves/winreg.py | 8 - .../aob_py3/future/moves/xmlrpc/client.py | 7 - .../aob_py3/future/moves/xmlrpc/server.py | 7 - .../future/standard_library/__init__.py | 815 -- .../aob_py3/future/tests/base.py | 539 -- .../aob_py3/future/types/__init__.py | 257 - .../aob_py3/future/types/newbytes.py | 460 -- .../aob_py3/future/types/newdict.py | 111 - .../aob_py3/future/types/newint.py | 381 - .../aob_py3/future/types/newlist.py | 95 - .../aob_py3/future/types/newmemoryview.py | 29 - .../aob_py3/future/types/newobject.py | 117 - .../aob_py3/future/types/newopen.py | 32 - .../aob_py3/future/types/newrange.py | 170 - .../aob_py3/future/types/newstr.py | 426 - .../aob_py3/future/utils/__init__.py | 767 -- .../aob_py3/future/utils/surrogateescape.py | 198 - .../aob_py3/httplib2-0.19.1.dist-info/LICENSE | 23 - .../httplib2-0.19.1.dist-info/METADATA | 71 - .../aob_py3/httplib2-0.19.1.dist-info/RECORD | 13 - .../httplib2-0.19.1.dist-info/top_level.txt | 1 - .../aob_py3/httplib2/__init__.py | 1783 ----- .../aob_py3/httplib2/auth.py | 63 - .../aob_py3/httplib2/cacerts.txt | 2197 ----- .../aob_py3/httplib2/certs.py | 42 - .../aob_py3/httplib2/error.py | 48 - .../aob_py3/httplib2/iri2uri.py | 124 - .../aob_py3/httplib2/socks.py | 518 -- .../aob_py3/idna-3.3.dist-info/RECORD | 15 - .../aob_py3/idna-3.3.dist-info/top_level.txt | 1 - .../INSTALLER | 0 .../LICENSE.md | 0 .../METADATA | 194 +- .../aob_py3/idna-3.4.dist-info/RECORD | 14 + .../WHEEL | 2 +- .../aob_py3/idna/__init__.py | 0 .../aob_py3/idna/codec.py | 0 .../aob_py3/idna/compat.py | 0 .../aob_py3/idna/core.py | 5 +- .../aob_py3/idna/idnadata.py | 34 +- .../aob_py3/idna/intranges.py | 0 .../aob_py3/idna/package_data.py | 2 +- .../aob_py3/idna/uts46data.py | 306 +- .../LICENSE | 13 - .../RECORD | 15 - .../INSTALLER | 0 .../LICENSE | 202 + .../METADATA | 51 +- .../importlib_metadata-6.6.0.dist-info/RECORD | 16 + .../WHEEL | 2 +- .../top_level.txt | 0 .../aob_py3/importlib_metadata/__init__.py | 484 +- .../aob_py3/importlib_metadata/_adapters.py | 22 + .../importlib_metadata/_collections.py | 0 .../aob_py3/importlib_metadata/_compat.py | 11 + .../aob_py3/importlib_metadata/_functools.py | 0 .../aob_py3/importlib_metadata/_itertools.py | 0 .../aob_py3/importlib_metadata/_meta.py | 27 +- .../aob_py3/importlib_metadata/_py39compat.py | 35 + .../aob_py3/importlib_metadata/_text.py | 0 .../INSTALLER | 0 .../LICENSE | 202 + .../METADATA | 104 + .../RECORD | 49 + .../WHEEL | 2 +- .../top_level.txt | 1 + .../importlib_resources/__init__.py | 0 .../importlib_resources/_adapters.py | 4 +- .../aob_py3/importlib_resources/_common.py | 207 + .../importlib_resources/_compat.py | 21 +- .../aob_py3/importlib_resources/_itertools.py | 38 + .../importlib_resources/_legacy.py | 3 +- .../_vendor => }/importlib_resources/abc.py | 63 +- .../py.typed} | 0 .../importlib_resources/readers.py | 48 +- .../importlib_resources/simple.py | 70 +- .../tests}/__init__.py | 0 .../importlib_resources/tests/_compat.py | 32 + .../importlib_resources/tests/_path.py | 56 + .../tests/data01}/__init__.py | 0 .../tests/data01/binary.file | Bin 0 -> 4 bytes .../tests/data01/subdirectory}/__init__.py | 0 .../tests/data01/subdirectory/binary.file | Bin 0 -> 4 bytes .../tests/data01/utf-16.file | Bin 0 -> 44 bytes .../tests/data01/utf-8.file | 1 + .../tests/data02}/__init__.py | 0 .../tests/data02/one}/__init__.py | 0 .../tests/data02/one/resource1.txt | 1 + .../subdirectory/subsubdir/resource.txt | 1 + .../tests/data02/two/__init__.py} | 0 .../tests/data02/two/resource2.txt | 1 + .../tests/namespacedata01/binary.file | Bin 0 -> 4 bytes .../tests/namespacedata01/utf-16.file | Bin 0 -> 44 bytes .../tests/namespacedata01/utf-8.file | 1 + .../tests/test_compatibilty_files.py | 104 + .../tests/test_contents.py | 43 + .../importlib_resources/tests/test_custom.py | 45 + .../importlib_resources/tests/test_files.py | 112 + .../importlib_resources/tests/test_open.py | 85 + .../importlib_resources/tests/test_path.py | 69 + .../importlib_resources/tests/test_read.py | 82 + .../importlib_resources/tests/test_reader.py | 144 + .../tests/test_resource.py | 252 + .../importlib_resources/tests/update-zips.py | 53 + .../aob_py3/importlib_resources/tests/util.py | 179 + .../tests/zipdata01/__init__.py} | 0 .../tests/zipdata01/ziptestdata.zip | Bin 0 -> 876 bytes .../tests/zipdata02/__init__.py} | 0 .../tests/zipdata02/ziptestdata.zip | Bin 0 -> 698 bytes .../aob_py3/jinja2/__init__.py | 0 .../aob_py3/jinja2/_compat.py | 0 .../aob_py3/jinja2/_identifier.py | 0 .../aob_py3/jinja2/asyncfilters.py | 0 .../aob_py3/jinja2/asyncsupport.py | 0 .../aob_py3/jinja2/bccache.py | 0 .../aob_py3/jinja2/compiler.py | 0 .../aob_py3/jinja2/constants.py | 0 .../aob_py3/jinja2/debug.py | 0 .../aob_py3/jinja2/defaults.py | 0 .../aob_py3/jinja2/environment.py | 0 .../aob_py3/jinja2/exceptions.py | 0 .../aob_py3/jinja2/ext.py | 0 .../aob_py3/jinja2/filters.py | 0 .../aob_py3/jinja2/idtracking.py | 0 .../aob_py3/jinja2/lexer.py | 0 .../aob_py3/jinja2/loaders.py | 0 .../aob_py3/jinja2/meta.py | 0 .../aob_py3/jinja2/nativetypes.py | 0 .../aob_py3/jinja2/nodes.py | 0 .../aob_py3/jinja2/optimizer.py | 0 .../aob_py3/jinja2/parser.py | 0 .../aob_py3/jinja2/runtime.py | 0 .../aob_py3/jinja2/sandbox.py | 0 .../aob_py3/jinja2/tests.py | 0 .../aob_py3/jinja2/utils.py | 0 .../aob_py3/jinja2/visitor.py | 0 .../aob_py3/jsl-0.2.4.dist-info/RECORD | 2 +- .../aob_py3/jsl-0.2.4.dist-info/WHEEL | 2 +- .../aob_py3/jsl/__init__.py | 0 .../aob_py3/jsl/_compat/__init__.py | 0 .../aob_py3/jsl/_compat/ordereddict.py | 0 .../aob_py3/jsl/_compat/prepareable.py | 0 .../aob_py3/jsl/document.py | 0 .../aob_py3/jsl/exceptions.py | 0 .../aob_py3/jsl/fields/__init__.py | 0 .../aob_py3/jsl/fields/base.py | 0 .../aob_py3/jsl/fields/compound.py | 0 .../aob_py3/jsl/fields/primitive.py | 0 .../aob_py3/jsl/fields/util.py | 0 .../aob_py3/jsl/registry.py | 0 .../aob_py3/jsl/resolutionscope.py | 0 .../aob_py3/jsl/roles.py | 0 .../aob_py3/jsoncomment/__init__.py | 0 .../aob_py3/jsoncomment/comments.py | 0 .../aob_py3/jsoncomment/wrapper.py | 0 .../aob_py3/jsonpath_ng/__init__.py | 0 .../aob_py3/jsonpath_ng/bin/__init__.py | 0 .../aob_py3/jsonpath_ng/bin/jsonpath.py | 0 .../aob_py3/jsonpath_ng/exceptions.py | 0 .../aob_py3/jsonpath_ng/ext/__init__.py | 0 .../aob_py3/jsonpath_ng/ext/arithmetic.py | 0 .../aob_py3/jsonpath_ng/ext/filter.py | 0 .../aob_py3/jsonpath_ng/ext/iterable.py | 0 .../aob_py3/jsonpath_ng/ext/parser.py | 0 .../aob_py3/jsonpath_ng/ext/string.py | 0 .../aob_py3/jsonpath_ng/jsonpath.py | 0 .../aob_py3/jsonpath_ng/lexer.py | 0 .../aob_py3/jsonpath_ng/parser.py | 0 .../jsonpath_rw-1.4.0.dist-info/RECORD | 2 +- .../aob_py3/jsonpath_rw-1.4.0.dist-info/WHEEL | 2 +- .../aob_py3/jsonpath_rw/__init__.py | 0 .../aob_py3/jsonpath_rw/bin/__init__.py | 0 .../aob_py3/jsonpath_rw/bin/jsonpath.py | 0 .../aob_py3/jsonpath_rw/jsonpath.py | 0 .../aob_py3/jsonpath_rw/lexer.py | 0 .../aob_py3/jsonpath_rw/parser.py | 0 .../aob_py3/jsonschema-3.2.0.dist-info/RECORD | 37 - .../aob_py3/jsonschema-3.2.0.dist-info/WHEEL | 6 - .../COPYING | 0 .../INSTALLER | 0 .../METADATA | 134 +- .../aob_py3/jsonschema-4.4.0.dist-info/RECORD | 43 + .../REQUESTED | 0 .../WHEEL | 0 .../entry_points.txt | 0 .../top_level.txt | 0 .../aob_py3/jsonschema/__init__.py | 40 +- .../aob_py3/jsonschema/__main__.py | 1 + .../aob_py3/jsonschema/_format.py | 285 +- .../aob_py3/jsonschema/_legacy_validators.py | 153 +- .../aob_py3/jsonschema/_reflect.py | 10 +- .../aob_py3/jsonschema/_types.py | 69 +- .../aob_py3/jsonschema/_utils.py | 284 +- .../aob_py3/jsonschema/_validators.py | 316 +- .../aob_py3/jsonschema/benchmarks/__init__.py | 0 .../aob_py3/jsonschema/benchmarks/issue232.py | 7 +- .../benchmarks/json_schema_test_suite.py | 2 - .../aob_py3/jsonschema/cli.py | 272 +- .../aob_py3/jsonschema/compat.py | 55 - .../aob_py3/jsonschema/exceptions.py | 129 +- .../aob_py3/jsonschema/protocols.py | 167 + .../jsonschema/schemas/draft2019-09.json | 42 + .../jsonschema/schemas/draft2020-12.json | 58 + .../aob_py3/jsonschema/schemas/draft3.json | 372 +- .../aob_py3/jsonschema/schemas/draft4.json | 263 +- .../jsonschema/schemas/vocabularies.json | 1 + .../aob_py3/jsonschema/tests/__init__.py | 0 .../aob_py3/jsonschema/tests/_helpers.py | 0 .../aob_py3/jsonschema/tests/_suite.py | 69 +- .../aob_py3/jsonschema/tests/fuzz_validate.py | 49 + .../aob_py3/jsonschema/tests/test_cli.py | 958 ++- .../jsonschema/tests/test_deprecations.py | 123 + .../jsonschema/tests/test_exceptions.py | 53 +- .../aob_py3/jsonschema/tests/test_format.py | 30 +- .../tests/test_jsonschema_test_suite.py | 346 +- .../aob_py3/jsonschema/tests/test_types.py | 89 +- .../aob_py3/jsonschema/tests/test_utils.py | 124 + .../jsonschema/tests/test_validators.py | 1219 ++- .../aob_py3/jsonschema/validators.py | 758 +- .../aob_py3/jsonspec/__init__.py | 0 .../aob_py3/jsonspec/__main__.py | 0 .../aob_py3/jsonspec/_version.py | 0 .../aob_py3/jsonspec/cli.py | 0 .../aob_py3/jsonspec/driver.py | 0 .../aob_py3/jsonspec/misc/__init__.py | 0 .../aob_py3/jsonspec/operations/__init__.py | 0 .../aob_py3/jsonspec/operations/bases.py | 0 .../aob_py3/jsonspec/operations/exceptions.py | 0 .../aob_py3/jsonspec/pointer/__init__.py | 0 .../aob_py3/jsonspec/pointer/bases.py | 0 .../aob_py3/jsonspec/pointer/exceptions.py | 0 .../aob_py3/jsonspec/pointer/stages.py | 0 .../aob_py3/jsonspec/reference/__init__.py | 0 .../aob_py3/jsonspec/reference/bases.py | 0 .../aob_py3/jsonspec/reference/exceptions.py | 0 .../aob_py3/jsonspec/reference/providers.py | 0 .../aob_py3/jsonspec/reference/util.py | 0 .../aob_py3/jsonspec/validators/__init__.py | 0 .../aob_py3/jsonspec/validators/bases.py | 0 .../aob_py3/jsonspec/validators/draft03.py | 0 .../aob_py3/jsonspec/validators/draft04.py | 0 .../aob_py3/jsonspec/validators/exceptions.py | 0 .../aob_py3/jsonspec/validators/factorize.py | 0 .../aob_py3/jsonspec/validators/formats.py | 0 .../jsonspec/validators/pointer_util.py | 0 .../aob_py3/jsonspec/validators/util.py | 0 .../aob_py3/lib2to3/__init__.py | 0 .../aob_py3/lib2to3/__main__.py | 0 .../aob_py3/lib2to3/btm_matcher.py | 0 .../aob_py3/lib2to3/btm_utils.py | 0 .../aob_py3/lib2to3/fixer_base.py | 0 .../aob_py3/lib2to3/fixer_util.py | 0 .../aob_py3/lib2to3/fixes/__init__.py | 0 .../aob_py3/lib2to3/fixes/fix_apply.py | 0 .../aob_py3/lib2to3/fixes/fix_asserts.py | 0 .../aob_py3/lib2to3/fixes/fix_basestring.py | 0 .../aob_py3/lib2to3/fixes/fix_buffer.py | 0 .../aob_py3/lib2to3/fixes/fix_dict.py | 0 .../aob_py3/lib2to3/fixes/fix_except.py | 0 .../aob_py3/lib2to3/fixes/fix_exec.py | 0 .../aob_py3/lib2to3/fixes/fix_execfile.py | 0 .../aob_py3/lib2to3/fixes/fix_exitfunc.py | 0 .../aob_py3/lib2to3/fixes/fix_filter.py | 0 .../aob_py3/lib2to3/fixes/fix_funcattrs.py | 0 .../aob_py3/lib2to3/fixes/fix_future.py | 0 .../aob_py3/lib2to3/fixes/fix_getcwdu.py | 0 .../aob_py3/lib2to3/fixes/fix_has_key.py | 0 .../aob_py3/lib2to3/fixes/fix_idioms.py | 0 .../aob_py3/lib2to3/fixes/fix_import.py | 0 .../aob_py3/lib2to3/fixes/fix_imports.py | 0 .../aob_py3/lib2to3/fixes/fix_imports2.py | 0 .../aob_py3/lib2to3/fixes/fix_input.py | 0 .../aob_py3/lib2to3/fixes/fix_intern.py | 0 .../aob_py3/lib2to3/fixes/fix_isinstance.py | 0 .../aob_py3/lib2to3/fixes/fix_itertools.py | 0 .../lib2to3/fixes/fix_itertools_imports.py | 0 .../aob_py3/lib2to3/fixes/fix_long.py | 0 .../aob_py3/lib2to3/fixes/fix_map.py | 0 .../aob_py3/lib2to3/fixes/fix_metaclass.py | 0 .../aob_py3/lib2to3/fixes/fix_methodattrs.py | 0 .../aob_py3/lib2to3/fixes/fix_ne.py | 0 .../aob_py3/lib2to3/fixes/fix_next.py | 0 .../aob_py3/lib2to3/fixes/fix_nonzero.py | 0 .../aob_py3/lib2to3/fixes/fix_numliterals.py | 0 .../aob_py3/lib2to3/fixes/fix_operator.py | 0 .../aob_py3/lib2to3/fixes/fix_paren.py | 0 .../aob_py3/lib2to3/fixes/fix_print.py | 0 .../aob_py3/lib2to3/fixes/fix_raise.py | 0 .../aob_py3/lib2to3/fixes/fix_raw_input.py | 0 .../aob_py3/lib2to3/fixes/fix_reduce.py | 0 .../aob_py3/lib2to3/fixes/fix_reload.py | 0 .../aob_py3/lib2to3/fixes/fix_renames.py | 0 .../aob_py3/lib2to3/fixes/fix_repr.py | 0 .../aob_py3/lib2to3/fixes/fix_set_literal.py | 0 .../lib2to3/fixes/fix_standarderror.py | 0 .../aob_py3/lib2to3/fixes/fix_sys_exc.py | 0 .../aob_py3/lib2to3/fixes/fix_throw.py | 0 .../aob_py3/lib2to3/fixes/fix_tuple_params.py | 0 .../aob_py3/lib2to3/fixes/fix_types.py | 0 .../aob_py3/lib2to3/fixes/fix_unicode.py | 0 .../aob_py3/lib2to3/fixes/fix_urllib.py | 0 .../aob_py3/lib2to3/fixes/fix_ws_comma.py | 0 .../aob_py3/lib2to3/fixes/fix_xrange.py | 0 .../aob_py3/lib2to3/fixes/fix_xreadlines.py | 0 .../aob_py3/lib2to3/fixes/fix_zip.py | 0 .../aob_py3/lib2to3/main.py | 0 .../aob_py3/lib2to3/patcomp.py | 0 .../aob_py3/lib2to3/pgen2/__init__.py | 0 .../aob_py3/lib2to3/pgen2/conv.py | 0 .../aob_py3/lib2to3/pgen2/driver.py | 0 .../aob_py3/lib2to3/pgen2/grammar.py | 0 .../aob_py3/lib2to3/pgen2/literals.py | 0 .../aob_py3/lib2to3/pgen2/parse.py | 0 .../aob_py3/lib2to3/pgen2/pgen.py | 0 .../aob_py3/lib2to3/pgen2/tokenize.py | 0 .../aob_py3/lib2to3/pygram.py | 0 .../aob_py3/lib2to3/pytree.py | 0 .../aob_py3/lib2to3/refactor.py | 0 .../aob_py3/lib2to3/tests/__init__.py | 0 .../aob_py3/lib2to3/tests/__main__.py | 0 .../aob_py3/lib2to3/tests/data/bom.py | 0 .../aob_py3/lib2to3/tests/data/crlf.py | 0 .../lib2to3/tests/data/fixers/bad_order.py | 0 .../tests/data/fixers/myfixes/__init__.py | 0 .../tests/data/fixers/myfixes/fix_explicit.py | 0 .../tests/data/fixers/myfixes/fix_first.py | 0 .../tests/data/fixers/myfixes/fix_last.py | 0 .../tests/data/fixers/myfixes/fix_parrot.py | 0 .../tests/data/fixers/myfixes/fix_preorder.py | 0 .../lib2to3/tests/data/fixers/no_fixer_cls.py | 0 .../tests/data/fixers/parrot_example.py | 0 .../lib2to3/tests/data/infinite_recursion.py | 0 .../lib2to3/tests/data/py2_test_grammar.py | 0 .../lib2to3/tests/data/py3_test_grammar.py | 0 .../aob_py3/lib2to3/tests/support.py | 0 .../aob_py3/lib2to3/tests/test_all_fixers.py | 0 .../aob_py3/lib2to3/tests/test_fixers.py | 0 .../aob_py3/lib2to3/tests/test_main.py | 0 .../aob_py3/lib2to3/tests/test_parser.py | 0 .../aob_py3/lib2to3/tests/test_pytree.py | 0 .../aob_py3/lib2to3/tests/test_refactor.py | 0 .../aob_py3/lib2to3/tests/test_util.py | 0 .../aob_py3/libfuturize/__init__.py | 1 - .../aob_py3/libfuturize/fixer_util.py | 520 -- .../aob_py3/libfuturize/fixes/__init__.py | 97 - .../aob_py3/libfuturize/fixes/fix_UserDict.py | 102 - .../libfuturize/fixes/fix_absolute_import.py | 91 - ...future__imports_except_unicode_literals.py | 26 - .../libfuturize/fixes/fix_basestring.py | 17 - .../aob_py3/libfuturize/fixes/fix_bytes.py | 24 - .../aob_py3/libfuturize/fixes/fix_cmp.py | 33 - .../aob_py3/libfuturize/fixes/fix_division.py | 12 - .../libfuturize/fixes/fix_division_safe.py | 104 - .../aob_py3/libfuturize/fixes/fix_execfile.py | 37 - .../libfuturize/fixes/fix_future_builtins.py | 59 - .../fixes/fix_future_standard_library.py | 24 - .../fix_future_standard_library_urllib.py | 28 - .../aob_py3/libfuturize/fixes/fix_input.py | 32 - .../libfuturize/fixes/fix_metaclass.py | 262 - .../libfuturize/fixes/fix_next_call.py | 104 - .../aob_py3/libfuturize/fixes/fix_object.py | 17 - .../libfuturize/fixes/fix_oldstr_wrap.py | 39 - .../fixes/fix_order___future__imports.py | 36 - .../aob_py3/libfuturize/fixes/fix_print.py | 94 - .../fixes/fix_print_with_import.py | 22 - .../aob_py3/libfuturize/fixes/fix_raise.py | 107 - .../fixes/fix_remove_old__future__imports.py | 26 - .../libfuturize/fixes/fix_unicode_keep_u.py | 24 - .../fixes/fix_unicode_literals_import.py | 18 - .../fixes/fix_xrange_with_import.py | 20 - .../aob_py3/libfuturize/main.py | 322 - .../aob_py3/libpasteurize/__init__.py | 1 - .../aob_py3/libpasteurize/fixes/__init__.py | 54 - .../libpasteurize/fixes/feature_base.py | 57 - .../fixes/fix_add_all__future__imports.py | 24 - .../fixes/fix_add_all_future_builtins.py | 37 - .../fix_add_future_standard_library_import.py | 23 - .../libpasteurize/fixes/fix_annotations.py | 48 - .../libpasteurize/fixes/fix_division.py | 28 - .../libpasteurize/fixes/fix_features.py | 86 - .../libpasteurize/fixes/fix_fullargspec.py | 16 - .../fixes/fix_future_builtins.py | 46 - .../aob_py3/libpasteurize/fixes/fix_getcwd.py | 26 - .../libpasteurize/fixes/fix_imports.py | 112 - .../libpasteurize/fixes/fix_imports2.py | 174 - .../aob_py3/libpasteurize/fixes/fix_kwargs.py | 147 - .../libpasteurize/fixes/fix_memoryview.py | 21 - .../libpasteurize/fixes/fix_metaclass.py | 78 - .../libpasteurize/fixes/fix_newstyle.py | 33 - .../aob_py3/libpasteurize/fixes/fix_next.py | 43 - .../libpasteurize/fixes/fix_printfunction.py | 17 - .../aob_py3/libpasteurize/fixes/fix_raise.py | 25 - .../aob_py3/libpasteurize/fixes/fix_raise_.py | 35 - .../aob_py3/libpasteurize/fixes/fix_throw.py | 23 - .../libpasteurize/fixes/fix_unpacking.py | 120 - .../aob_py3/libpasteurize/main.py | 204 - .../aob_py3/mako/__init__.py | 4 +- .../aob_py3/mako/_ast_util.py | 13 +- .../aob_py3/mako/ast.py | 29 +- .../aob_py3/mako/cache.py | 9 +- .../aob_py3/mako/cmd.py | 17 +- .../aob_py3/mako/codegen.py | 67 +- .../aob_py3/mako/compat.py | 130 +- .../aob_py3/mako/exceptions.py | 41 +- .../aob_py3/mako/ext/__init__.py | 0 .../aob_py3/mako/ext/autohandler.py | 2 +- .../aob_py3/mako/ext/babelplugin.py | 11 +- .../aob_py3/mako/ext/beaker_cache.py | 4 +- .../aob_py3/mako/ext/extract.py | 22 +- .../aob_py3/mako/ext/linguaplugin.py | 30 +- .../aob_py3/mako/ext/preprocessors.py | 2 +- .../aob_py3/mako/ext/pygmentplugin.py | 19 +- .../aob_py3/mako/ext/turbogears.py | 6 +- .../aob_py3/mako/filters.py | 96 +- .../aob_py3/mako/lexer.py | 253 +- .../aob_py3/mako/lookup.py | 58 +- .../aob_py3/mako/parsetree.py | 117 +- .../aob_py3/mako/pygen.py | 68 +- .../aob_py3/mako/pyparser.py | 82 +- .../aob_py3/mako/runtime.py | 92 +- .../aob_py3/mako/template.py | 122 +- .../aob_py3/mako/testing/__init__.py | 0 .../aob_py3/mako/testing/_config.py | 128 + .../aob_py3/mako/testing/assertions.py | 167 + .../aob_py3/mako/testing/config.py | 17 + .../aob_py3/mako/testing/exclusions.py | 80 + .../aob_py3/mako/testing/fixtures.py | 119 + .../aob_py3/mako/testing/helpers.py | 67 + .../aob_py3/mako/util.py | 64 +- .../aob_py3/markupsafe/__init__.py | 0 .../aob_py3/markupsafe/_native.py | 0 .../aob_py3/modinput_wrapper/__init__.py | 0 .../aob_py3/munch-2.3.2.dist-info/RECORD | 2 +- .../aob_py3/munch-2.3.2.dist-info/WHEEL | 2 +- .../aob_py3/munch/__init__.py | 0 .../aob_py3/munch/python3_compat.py | 0 .../aob_py3/past/__init__.py | 90 - .../aob_py3/past/builtins/__init__.py | 72 - .../aob_py3/past/builtins/misc.py | 94 - .../aob_py3/past/builtins/noniterators.py | 272 - .../aob_py3/past/translation/__init__.py | 485 -- .../aob_py3/past/types/__init__.py | 29 - .../aob_py3/past/types/basestring.py | 39 - .../aob_py3/past/types/olddict.py | 96 - .../aob_py3/past/types/oldstr.py | 135 - .../aob_py3/past/utils/__init__.py | 97 - .../aob_py3/pkg_resources/__init__.py | 3283 -------- .../aob_py3/pkg_resources/_vendor/appdirs.py | 608 -- .../_vendor/importlib_resources/_common.py | 104 - .../_vendor/importlib_resources/_itertools.py | 35 - .../pkg_resources/_vendor/jaraco/context.py | 213 - .../pkg_resources/_vendor/jaraco/functools.py | 525 -- .../_vendor/jaraco/text/__init__.py | 599 -- .../_vendor/more_itertools/__init__.py | 4 - .../_vendor/more_itertools/more.py | 4317 ---------- .../_vendor/more_itertools/recipes.py | 698 -- .../_vendor/packaging/__about__.py | 26 - .../_vendor/packaging/__init__.py | 25 - .../_vendor/packaging/_manylinux.py | 301 - .../_vendor/packaging/_musllinux.py | 136 - .../_vendor/packaging/_structures.py | 67 - .../_vendor/packaging/markers.py | 304 - .../_vendor/packaging/requirements.py | 146 - .../_vendor/packaging/specifiers.py | 828 -- .../pkg_resources/_vendor/packaging/tags.py | 484 -- .../pkg_resources/_vendor/packaging/utils.py | 136 - .../_vendor/packaging/version.py | 504 -- .../pkg_resources/_vendor/pyparsing.py | 5742 ------------- .../aob_py3/pkg_resources/extern/__init__.py | 76 - .../aob_py3/ply/__init__.py | 0 .../aob_py3/ply/cpp.py | 0 .../aob_py3/ply/ctokens.py | 0 .../aob_py3/ply/lex.py | 0 .../aob_py3/ply/yacc.py | 0 .../aob_py3/ply/ygen.py | 0 .../aob_py3/pyparsing-2.4.7.dist-info/LICENSE | 18 - .../pyparsing-2.4.7.dist-info/METADATA | 104 - .../aob_py3/pyparsing-2.4.7.dist-info/RECORD | 7 - .../aob_py3/pyparsing-2.4.7.dist-info/WHEEL | 6 - .../pyparsing-2.4.7.dist-info/top_level.txt | 1 - .../aob_py3/pyparsing.py | 7107 ----------------- .../aob_py3/pyrsistent-0.18.1.dist-info/WHEEL | 6 - .../INSTALLER | 0 .../LICENSE.mit | 0 .../METADATA | 30 +- .../RECORD | 23 +- .../WHEEL | 2 +- .../top_level.txt | 1 - .../aob_py3/pyrsistent/__init__.py | 0 .../aob_py3/pyrsistent/_checked_types.py | 0 .../aob_py3/pyrsistent/_field_common.py | 0 .../aob_py3/pyrsistent/_helpers.py | 0 .../aob_py3/pyrsistent/_immutable.py | 8 +- .../aob_py3/pyrsistent/_pbag.py | 0 .../aob_py3/pyrsistent/_pclass.py | 0 .../aob_py3/pyrsistent/_pdeque.py | 0 .../aob_py3/pyrsistent/_plist.py | 0 .../aob_py3/pyrsistent/_pmap.py | 129 +- .../aob_py3/pyrsistent/_precord.py | 0 .../aob_py3/pyrsistent/_pset.py | 0 .../aob_py3/pyrsistent/_pvector.py | 0 .../aob_py3/pyrsistent/_toolz.py | 4 +- .../aob_py3/pyrsistent/_transformations.py | 0 .../aob_py3/pyrsistent/typing.py | 0 .../aob_py3/pyrsistent/typing.pyi | 2 +- .../aob_py3/requests-2.26.0.dist-info/RECORD | 25 - .../aob_py3/requests-2.26.0.dist-info/WHEEL | 6 - .../INSTALLER | 0 .../LICENSE | 0 .../METADATA | 40 +- .../aob_py3/requests-2.28.0.dist-info/RECORD | 25 + .../requests-2.28.0.dist-info/REQUESTED | 0 .../WHEEL | 0 .../top_level.txt | 0 .../aob_py3/requests/__init__.py | 90 +- .../aob_py3/requests/__version__.py | 20 +- .../aob_py3/requests/_internal_utils.py | 24 +- .../aob_py3/requests/adapters.py | 229 +- .../aob_py3/requests/api.py | 18 +- .../aob_py3/requests/auth.py | 156 +- .../aob_py3/requests/certs.py | 3 +- .../aob_py3/requests/compat.py | 90 +- .../aob_py3/requests/cookies.py | 152 +- .../aob_py3/requests/exceptions.py | 32 +- .../aob_py3/requests/help.py | 101 +- .../aob_py3/requests/hooks.py | 7 +- .../aob_py3/requests/models.py | 357 +- .../aob_py3/requests/packages.py | 16 +- .../aob_py3/requests/sessions.py | 288 +- .../aob_py3/requests/status_codes.py | 169 +- .../aob_py3/requests/structures.py | 12 +- .../aob_py3/requests/utils.py | 439 +- .../setuptools-60.7.1.dist-info/METADATA | 142 - .../setuptools-60.7.1.dist-info/RECORD | 175 - .../entry_points.txt | 56 - .../setuptools-60.7.1.dist-info/top_level.txt | 3 - .../aob_py3/setuptools/__init__.py | 244 - .../setuptools/_deprecation_warning.py | 7 - .../aob_py3/setuptools/_distutils/__init__.py | 24 - .../setuptools/_distutils/_collections.py | 56 - .../setuptools/_distutils/_msvccompiler.py | 561 -- .../setuptools/_distutils/archive_util.py | 256 - .../setuptools/_distutils/bcppcompiler.py | 393 - .../setuptools/_distutils/ccompiler.py | 1123 --- .../aob_py3/setuptools/_distutils/cmd.py | 403 - .../setuptools/_distutils/command/__init__.py | 31 - .../setuptools/_distutils/command/bdist.py | 143 - .../_distutils/command/bdist_dumb.py | 123 - .../_distutils/command/bdist_msi.py | 749 -- .../_distutils/command/bdist_rpm.py | 579 -- .../_distutils/command/bdist_wininst.py | 377 - .../setuptools/_distutils/command/build.py | 157 - .../_distutils/command/build_clib.py | 209 - .../_distutils/command/build_ext.py | 755 -- .../setuptools/_distutils/command/build_py.py | 392 - .../_distutils/command/build_scripts.py | 152 - .../setuptools/_distutils/command/check.py | 148 - .../setuptools/_distutils/command/clean.py | 76 - .../setuptools/_distutils/command/config.py | 344 - .../setuptools/_distutils/command/install.py | 781 -- .../_distutils/command/install_data.py | 79 - .../_distutils/command/install_egg_info.py | 84 - .../_distutils/command/install_headers.py | 47 - .../_distutils/command/install_lib.py | 217 - .../_distutils/command/install_scripts.py | 60 - .../_distutils/command/py37compat.py | 30 - .../setuptools/_distutils/command/register.py | 304 - .../setuptools/_distutils/command/sdist.py | 494 -- .../setuptools/_distutils/command/upload.py | 214 - .../aob_py3/setuptools/_distutils/config.py | 130 - .../aob_py3/setuptools/_distutils/core.py | 249 - .../setuptools/_distutils/cygwinccompiler.py | 362 - .../aob_py3/setuptools/_distutils/debug.py | 5 - .../aob_py3/setuptools/_distutils/dep_util.py | 92 - .../aob_py3/setuptools/_distutils/dir_util.py | 210 - .../aob_py3/setuptools/_distutils/dist.py | 1257 --- .../aob_py3/setuptools/_distutils/errors.py | 97 - .../setuptools/_distutils/extension.py | 240 - .../setuptools/_distutils/fancy_getopt.py | 457 -- .../setuptools/_distutils/file_util.py | 238 - .../aob_py3/setuptools/_distutils/filelist.py | 355 - .../aob_py3/setuptools/_distutils/log.py | 81 - .../setuptools/_distutils/msvc9compiler.py | 788 -- .../setuptools/_distutils/msvccompiler.py | 643 -- .../setuptools/_distutils/py35compat.py | 19 - .../setuptools/_distutils/py38compat.py | 7 - .../aob_py3/setuptools/_distutils/spawn.py | 106 - .../setuptools/_distutils/sysconfig.py | 567 -- .../setuptools/_distutils/text_file.py | 286 - .../setuptools/_distutils/unixccompiler.py | 325 - .../aob_py3/setuptools/_distutils/util.py | 496 -- .../aob_py3/setuptools/_distutils/version.py | 363 - .../setuptools/_distutils/versionpredicate.py | 169 - .../aob_py3/setuptools/_imp.py | 82 - .../_vendor/more_itertools/__init__.py | 4 - .../setuptools/_vendor/more_itertools/more.py | 3825 --------- .../_vendor/more_itertools/recipes.py | 620 -- .../aob_py3/setuptools/_vendor/ordered_set.py | 488 -- .../setuptools/_vendor/packaging/__about__.py | 26 - .../setuptools/_vendor/packaging/__init__.py | 25 - .../_vendor/packaging/_manylinux.py | 301 - .../_vendor/packaging/_musllinux.py | 136 - .../_vendor/packaging/_structures.py | 67 - .../setuptools/_vendor/packaging/markers.py | 304 - .../_vendor/packaging/requirements.py | 146 - .../_vendor/packaging/specifiers.py | 828 -- .../setuptools/_vendor/packaging/tags.py | 484 -- .../setuptools/_vendor/packaging/utils.py | 136 - .../setuptools/_vendor/packaging/version.py | 504 -- .../aob_py3/setuptools/_vendor/pyparsing.py | 5742 ------------- .../aob_py3/setuptools/archive_util.py | 205 - .../aob_py3/setuptools/build_meta.py | 290 - .../aob_py3/setuptools/cli-32.exe | Bin 65536 -> 0 bytes .../aob_py3/setuptools/cli-64.exe | Bin 74752 -> 0 bytes .../aob_py3/setuptools/cli-arm64.exe | Bin 137216 -> 0 bytes .../aob_py3/setuptools/cli.exe | Bin 65536 -> 0 bytes .../aob_py3/setuptools/command/__init__.py | 8 - .../aob_py3/setuptools/command/alias.py | 78 - .../aob_py3/setuptools/command/bdist_egg.py | 456 -- .../aob_py3/setuptools/command/bdist_rpm.py | 40 - .../aob_py3/setuptools/command/build_clib.py | 101 - .../aob_py3/setuptools/command/build_ext.py | 328 - .../aob_py3/setuptools/command/build_py.py | 242 - .../aob_py3/setuptools/command/develop.py | 193 - .../aob_py3/setuptools/command/dist_info.py | 36 - .../setuptools/command/easy_install.py | 2317 ------ .../aob_py3/setuptools/command/egg_info.py | 755 -- .../aob_py3/setuptools/command/install.py | 132 - .../setuptools/command/install_egg_info.py | 62 - .../aob_py3/setuptools/command/install_lib.py | 122 - .../setuptools/command/install_scripts.py | 69 - .../setuptools/command/launcher manifest.xml | 15 - .../aob_py3/setuptools/command/py36compat.py | 134 - .../aob_py3/setuptools/command/register.py | 18 - .../aob_py3/setuptools/command/rotate.py | 64 - .../aob_py3/setuptools/command/saveopts.py | 22 - .../aob_py3/setuptools/command/sdist.py | 196 - .../aob_py3/setuptools/command/setopt.py | 149 - .../aob_py3/setuptools/command/test.py | 252 - .../aob_py3/setuptools/command/upload.py | 17 - .../aob_py3/setuptools/command/upload_docs.py | 202 - .../aob_py3/setuptools/config.py | 751 -- .../aob_py3/setuptools/dep_util.py | 25 - .../aob_py3/setuptools/depends.py | 176 - .../aob_py3/setuptools/dist.py | 1164 --- .../aob_py3/setuptools/errors.py | 40 - .../aob_py3/setuptools/extension.py | 55 - .../aob_py3/setuptools/extern/__init__.py | 73 - .../aob_py3/setuptools/glob.py | 167 - .../aob_py3/setuptools/gui-32.exe | Bin 65536 -> 0 bytes .../aob_py3/setuptools/gui-64.exe | Bin 75264 -> 0 bytes .../aob_py3/setuptools/gui-arm64.exe | Bin 137728 -> 0 bytes .../aob_py3/setuptools/gui.exe | Bin 65536 -> 0 bytes .../aob_py3/setuptools/installer.py | 104 - .../aob_py3/setuptools/launch.py | 36 - .../aob_py3/setuptools/logging.py | 36 - .../aob_py3/setuptools/monkey.py | 177 - .../aob_py3/setuptools/msvc.py | 1805 ----- .../aob_py3/setuptools/namespaces.py | 107 - .../aob_py3/setuptools/package_index.py | 1127 --- .../aob_py3/setuptools/py34compat.py | 13 - .../aob_py3/setuptools/sandbox.py | 530 -- .../aob_py3/setuptools/script (dev).tmpl | 6 - .../aob_py3/setuptools/script.tmpl | 3 - .../aob_py3/setuptools/unicode_utils.py | 42 - .../aob_py3/setuptools/version.py | 6 - .../aob_py3/setuptools/wheel.py | 213 - .../aob_py3/setuptools/windows_support.py | 29 - .../splunk_ta_cisco_dnacenter/aob_py3/six.py | 0 .../aob_py3/socks.py | 0 .../aob_py3/sockshandler.py | 0 .../INSTALLER | 0 .../aob_py3/solnlib-4.7.0.dist-info/LICENSE | 201 + .../METADATA | 8 +- .../RECORD | 26 +- .../aob_py3/solnlib-4.7.0.dist-info/REQUESTED | 0 .../aob_py3/solnlib-4.7.0.dist-info/WHEEL | 4 + .../aob_py3/solnlib/__init__.py | 2 +- .../aob_py3/solnlib/_utils.py | 0 .../aob_py3/solnlib/acl.py | 0 .../aob_py3/solnlib/conf_manager.py | 71 +- .../aob_py3/solnlib/credentials.py | 101 +- .../aob_py3/solnlib/file_monitor.py | 16 +- .../aob_py3/solnlib/hec_config.py | 0 .../aob_py3/solnlib/log.py | 0 .../aob_py3/solnlib/modular_input/__init__.py | 0 .../solnlib/modular_input/checkpointer.py | 0 .../aob_py3/solnlib/modular_input/event.py | 0 .../solnlib/modular_input/event_writer.py | 0 .../solnlib/modular_input/modular_input.py | 6 +- .../aob_py3/solnlib/net_utils.py | 0 .../aob_py3/solnlib/orphan_process_monitor.py | 0 .../aob_py3/solnlib/pattern.py | 0 .../aob_py3/solnlib/server_info.py | 0 .../aob_py3/solnlib/splunk_rest_client.py | 0 .../aob_py3/solnlib/splunkenv.py | 2 - .../aob_py3/solnlib/time_parser.py | 0 .../aob_py3/solnlib/timer_queue.py | 0 .../aob_py3/solnlib/user_access.py | 0 .../aob_py3/solnlib/utils.py | 27 +- .../aob_py3/sortedcontainers/__init__.py | 0 .../aob_py3/sortedcontainers/sorteddict.py | 0 .../aob_py3/sortedcontainers/sortedlist.py | 0 .../aob_py3/sortedcontainers/sortedset.py | 0 .../aob_py3/splunk_aoblib/__init__.py | 0 .../aob_py3/splunk_aoblib/utility.py | 0 .../INSTALLER | 0 .../METADATA | 4 +- .../RECORD | 26 +- .../splunk_sdk-1.6.18.dist-info/REQUESTED | 0 .../aob_py3/splunk_sdk-1.6.18.dist-info/WHEEL | 5 + .../top_level.txt | 0 .../aob_py3/splunklib/__init__.py | 2 +- .../aob_py3/splunklib/binding.py | 20 +- .../aob_py3/splunklib/client.py | 48 +- .../aob_py3/splunklib/data.py | 0 .../splunklib/modularinput/__init__.py | 0 .../splunklib/modularinput/argument.py | 0 .../aob_py3/splunklib/modularinput/event.py | 0 .../splunklib/modularinput/event_writer.py | 1 - .../modularinput/input_definition.py | 0 .../aob_py3/splunklib/modularinput/scheme.py | 0 .../aob_py3/splunklib/modularinput/script.py | 0 .../aob_py3/splunklib/modularinput/utils.py | 0 .../modularinput/validation_definition.py | 0 .../aob_py3/splunklib/ordereddict.py | 0 .../aob_py3/splunklib/results.py | 0 .../splunklib/searchcommands/__init__.py | 0 .../splunklib/searchcommands/decorators.py | 0 .../splunklib/searchcommands/environment.py | 0 .../searchcommands/eventing_command.py | 0 .../searchcommands/external_search_command.py | 0 .../searchcommands/generating_command.py | 45 +- .../splunklib/searchcommands/internals.py | 3 + .../searchcommands/reporting_command.py | 0 .../searchcommands/search_command.py | 31 +- .../searchcommands/streaming_command.py | 0 .../splunklib/searchcommands/validators.py | 0 .../aob_py3/splunklib/six.py | 0 .../INSTALLER | 0 .../LICENSE | 0 .../METADATA | 4 +- .../RECORD | 26 +- .../WHEEL | 0 .../aob_py3/splunktalib/__init__.py | 2 +- .../aob_py3/splunktalib/common/__init__.py | 0 .../aob_py3/splunktalib/common/consts.py | 0 .../aob_py3/splunktalib/common/log.py | 0 .../aob_py3/splunktalib/common/pattern.py | 0 .../aob_py3/splunktalib/common/util.py | 0 .../splunktalib/common/xml_dom_parser.py | 1 - .../splunktalib/concurrent/__init__.py | 0 .../concurrent/concurrent_executor.py | 0 .../splunktalib/concurrent/process_pool.py | 0 .../splunktalib/concurrent/thread_pool.py | 0 .../splunktalib/conf_manager/__init__.py | 0 .../conf_manager/conf_endpoints.py | 0 .../splunktalib/conf_manager/conf_manager.py | 0 .../conf_manager/data_input_endpoints.py | 0 .../conf_manager/property_endpoints.py | 0 .../splunktalib/conf_manager/request.py | 22 +- .../conf_manager/ta_conf_manager.py | 0 .../aob_py3/splunktalib/credentials.py | 34 +- .../aob_py3/splunktalib/event_writer.py | 0 .../aob_py3/splunktalib/file_monitor.py | 0 .../aob_py3/splunktalib/kv_client.py | 14 +- .../aob_py3/splunktalib/modinput.py | 2 +- .../splunktalib/orphan_process_monitor.py | 0 .../aob_py3/splunktalib/rest.py | 117 +- .../aob_py3/splunktalib/schedule/__init__.py | 0 .../aob_py3/splunktalib/schedule/job.py | 0 .../aob_py3/splunktalib/schedule/scheduler.py | 0 .../aob_py3/splunktalib/splunk_cluster.py | 6 +- .../aob_py3/splunktalib/splunk_platform.py | 0 .../aob_py3/splunktalib/state_store.py | 0 .../aob_py3/splunktalib/timer.py | 0 .../aob_py3/splunktalib/timer_queue.py | 0 .../INSTALLER | 0 .../LICENSE | 0 .../METADATA | 6 +- .../RECORD | 26 +- .../splunktaucclib-6.0.0.dist-info/REQUESTED | 0 .../WHEEL | 0 .../aob_py3/splunktaucclib/__init__.py | 2 +- .../splunktaucclib/alert_actions_base.py | 86 +- .../aob_py3/splunktaucclib/cim_actions.py | 0 .../aob_py3/splunktaucclib/common/__init__.py | 0 .../aob_py3/splunktaucclib/common/log.py | 0 .../aob_py3/splunktaucclib/config.py | 27 +- .../data_collection/__init__.py | 0 .../data_collection/ta_checkpoint_manager.py | 0 .../data_collection/ta_config.py | 0 .../data_collection/ta_consts.py | 0 .../data_collection/ta_data_client.py | 0 .../data_collection/ta_data_collector.py | 0 .../data_collection/ta_data_loader.py | 0 .../data_collection/ta_helper.py | 0 .../data_collection/ta_mod_input.py | 0 .../splunktaucclib/global_config/__init__.py | 0 .../global_config/configuration.py | 0 .../splunktaucclib/global_config/schema.py | 0 .../modinput_wrapper/__init__.py | 0 .../modinput_wrapper/base_modinput.py | 0 .../splunktaucclib/rest_handler/__init__.py | 0 .../rest_handler/admin_external.py | 0 .../splunktaucclib/rest_handler/base.py | 13 +- .../rest_handler/base_hook_mixin.py | 0 .../splunktaucclib/rest_handler/cred_mgmt.py | 0 .../rest_handler/credentials.py | 0 .../splunktaucclib/rest_handler/datainput.py | 0 .../splunktaucclib/rest_handler/eai.py | 0 .../rest_handler/endpoint/__init__.py | 0 .../rest_handler/endpoint/converter.py | 0 .../rest_handler/endpoint/field.py | 0 .../rest_handler/endpoint/validator.py | 0 .../splunktaucclib/rest_handler/entity.py | 0 .../splunktaucclib/rest_handler/error.py | 0 .../splunktaucclib/rest_handler/error_ctl.py | 0 .../splunktaucclib/rest_handler/handler.py | 0 .../splunktaucclib/rest_handler/multimodel.py | 0 .../splunktaucclib/rest_handler/normaliser.py | 0 .../splunktaucclib/rest_handler/poster.py | 37 +- .../splunktaucclib/rest_handler/schema.py | 0 .../splunktaucclib/rest_handler/teardown.py | 12 +- .../splunktaucclib/rest_handler/util.py | 48 +- .../splunktaucclib/rest_handler/validator.py | 0 .../splunktaucclib/splunk_aoblib/__init__.py | 0 .../splunk_aoblib/rest_helper.py | 0 .../splunk_aoblib/rest_migration.py | 0 .../splunk_aoblib/setup_util.py | 0 .../splunktaucclib/splunk_aoblib/utility.py | 0 .../INSTALLER | 1 - .../METADATA | 35 - .../typing_extensions-4.0.1.dist-info/RECORD | 6 - .../INSTALLER | 0 .../LICENSE | 20 +- .../METADATA | 190 + .../typing_extensions-4.5.0.dist-info/RECORD | 6 + .../WHEEL | 2 +- .../aob_py3/typing_extensions.py | 2116 ++--- .../INSTALLER | 0 .../LICENSE.txt | 0 .../METADATA | 79 +- .../aob_py3/urllib3-1.26.15.dist-info/RECORD | 44 + .../WHEEL | 2 +- .../top_level.txt | 0 .../urllib3-1.26.8.dist-info/INSTALLER | 1 - .../aob_py3/urllib3-1.26.8.dist-info/RECORD | 44 - .../aob_py3/urllib3-1.26.8.dist-info/WHEEL | 6 - .../aob_py3/urllib3/__init__.py | 17 + .../aob_py3/urllib3/_collections.py | 0 .../aob_py3/urllib3/_version.py | 2 +- .../aob_py3/urllib3/connection.py | 13 +- .../aob_py3/urllib3/connectionpool.py | 6 +- .../aob_py3/urllib3/contrib/__init__.py | 0 .../urllib3/contrib/_appengine_environ.py | 0 .../contrib/_securetransport/__init__.py | 0 .../contrib/_securetransport/bindings.py | 0 .../contrib/_securetransport/low_level.py | 0 .../aob_py3/urllib3/contrib/appengine.py | 2 +- .../aob_py3/urllib3/contrib/ntlmpool.py | 4 +- .../aob_py3/urllib3/contrib/pyopenssl.py | 17 +- .../urllib3/contrib/securetransport.py | 1 - .../aob_py3/urllib3/contrib/socks.py | 0 .../aob_py3/urllib3/exceptions.py | 0 .../aob_py3/urllib3/fields.py | 0 .../aob_py3/urllib3/filepost.py | 0 .../aob_py3/urllib3/packages/__init__.py | 0 .../urllib3/packages/backports/__init__.py | 0 .../urllib3/packages/backports/makefile.py | 0 .../aob_py3/urllib3/packages/six.py | 1 - .../aob_py3/urllib3/poolmanager.py | 1 + .../aob_py3/urllib3/request.py | 0 .../aob_py3/urllib3/response.py | 72 +- .../aob_py3/urllib3/util/__init__.py | 0 .../aob_py3/urllib3/util/connection.py | 0 .../aob_py3/urllib3/util/proxy.py | 0 .../aob_py3/urllib3/util/queue.py | 0 .../aob_py3/urllib3/util/request.py | 5 +- .../aob_py3/urllib3/util/response.py | 0 .../aob_py3/urllib3/util/retry.py | 2 +- .../aob_py3/urllib3/util/ssl_.py | 0 .../urllib3/util/ssl_match_hostname.py | 10 +- .../aob_py3/urllib3/util/ssltransport.py | 0 .../aob_py3/urllib3/util/timeout.py | 9 +- .../aob_py3/urllib3/util/url.py | 11 +- .../aob_py3/urllib3/util/wait.py | 1 - .../INSTALLER | 0 .../LICENSE | 0 .../aob_py3/zipp-3.15.0.dist-info/METADATA | 106 + .../aob_py3/zipp-3.15.0.dist-info/RECORD | 8 + .../aob_py3/zipp-3.15.0.dist-info/WHEEL | 5 + .../top_level.txt | 0 .../aob_py3/zipp-3.7.0.dist-info/INSTALLER | 1 - .../aob_py3/zipp-3.7.0.dist-info/LICENSE | 19 - .../aob_py3/zipp-3.7.0.dist-info/METADATA | 58 - .../aob_py3/zipp-3.7.0.dist-info/RECORD | 7 - .../aob_py3/zipp-3.7.0.dist-info/WHEEL | 5 - .../splunk_ta_cisco_dnacenter/aob_py3/zipp.py | 329 - .../_vendor/zipp.py => zipp/__init__.py} | 121 +- .../aob_py3/zipp/py310compat.py | 12 + .../bin/splunk_ta_cisco_dnacenter_declare.py | 0 .../default/addon_builder.conf | 5 +- .../splunk_ta_cisco_dnacenter_settings.conf | 1 + Splunk-TA-cisco-dnacenter/static/appIcon.png | Bin 2225 -> 2289 bytes .../static/appIconAlt.png | Bin 2225 -> 2289 bytes .../static/appIconAlt_2x.png | Bin 6754 -> 8428 bytes .../static/appIcon_2x.png | Bin 6754 -> 8428 bytes 1188 files changed, 16066 insertions(+), 126281 deletions(-) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_account.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_clienthealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_compliance.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_devicehealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_issue.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_networkhealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_securityadvisory.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_settings.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_clienthealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_compliance.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_devicehealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_issue.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_networkhealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_securityadvisory.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_clienthealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_compliance.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_devicehealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_issue.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_networkhealth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_securityadvisory.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/AUTHORS delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/entry_points.txt rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{Mako-1.1.0.dist-info => Mako-1.2.4.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{Mako-1.1.0.dist-info => Mako-1.2.4.dist-info}/LICENSE (93%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{Mako-1.1.0.dist-info => Mako-1.2.4.dist-info}/METADATA (78%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{Mako-1.1.0.dist-info => Mako-1.2.4.dist-info}/REQUESTED (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{importlib_metadata-4.10.1.dist-info => Mako-1.2.4.dist-info}/WHEEL (65%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/entry_points.txt rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{Mako-1.1.0.dist-info => Mako-1.2.4.dist-info}/top_level.txt (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{cloudconnectlib-3.1.0b3.dist-info => MarkupSafe-2.0.1.dist-info}/REQUESTED (100%) delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_distutils_hack/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_distutils_hack/override.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_pyrsistent_version.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_config.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_funcs.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_make.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_next_gen.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_typing_compat.pyi mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_version_info.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/AUTHORS.rst delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/METADATA delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/top_level.txt rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{attrs-21.4.0.dist-info => attrs-23.1.0.dist-info}/INSTALLER (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/METADATA create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{cloudconnectlib-3.1.0b3.dist-info => attrs-23.1.0.dist-info}/WHEEL (67%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{attrs-21.4.0.dist-info => attrs-23.1.0.dist-info/licenses}/LICENSE (94%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/converters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/filters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/setters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/validators.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/futurize mode change 100644 => 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/json mode change 100644 => 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/jsonpath_ng mode change 100644 => 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/jsonschema mode change 100644 => 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/mako-render mode change 100644 => 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/normalizer delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/pasteurize delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/WHEEL rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{certifi-2021.10.8.dist-info => certifi-2022.12.7.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{certifi-2021.10.8.dist-info => certifi-2022.12.7.dist-info}/LICENSE (91%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{certifi-2021.10.8.dist-info => certifi-2022.12.7.dist-info}/METADATA (85%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{idna-3.3.dist-info => certifi-2022.12.7.dist-info}/WHEEL (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{certifi-2021.10.8.dist-info => certifi-2022.12.7.dist-info}/top_level.txt (100%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/__main__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/core.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{future/backports/email/mime/__init__.py => certifi/py.typed} (100%) mode change 100755 => 100644 rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{charset_normalizer-2.0.11.dist-info => charset_normalizer-2.0.12.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{charset_normalizer-2.0.11.dist-info => charset_normalizer-2.0.12.dist-info}/LICENSE (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{charset_normalizer-2.0.11.dist-info => charset_normalizer-2.0.12.dist-info}/METADATA (99%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{charset_normalizer-2.0.11.dist-info => charset_normalizer-2.0.12.dist-info}/RECORD (64%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{Mako-1.1.0.dist-info => charset_normalizer-2.0.12.dist-info}/WHEEL (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{charset_normalizer-2.0.11.dist-info => charset_normalizer-2.0.12.dist-info}/entry_points.txt (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{charset_normalizer-2.0.11.dist-info => charset_normalizer-2.0.12.dist-info}/top_level.txt (100%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/api.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/assets/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cd.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cli/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cli/normalizer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/constant.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/legacy.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/md.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/models.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/version.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{cloudconnectlib-3.1.0b3.dist-info => cloudconnectlib-3.1.3.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{solnlib-4.4.1.dist-info => cloudconnectlib-3.1.3.dist-info}/LICENSE (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{cloudconnectlib-3.1.0b3.dist-info => cloudconnectlib-3.1.3.dist-info}/METADATA (65%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{jsonschema-3.2.0.dist-info => cloudconnectlib-3.1.3.dist-info}/REQUESTED (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/WHEEL mode change 100644 => 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/configuration/schema_1_0_0.json delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/cacerts/ca_certs_locater.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/checkpoint.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine_v2.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/job.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/plugin.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/task.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/decorator.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/ElementTree.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/cElementTree.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/common.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/expatbuilder.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/expatreader.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/lxml.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/minidom.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/pulldom.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/sax.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/xmlrpc.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/distutils-precedence.pth delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/LICENSE.txt delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/METADATA delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/entry_points.txt delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/top_level.txt delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/_markupbase.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/datetime.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/_encoded_words.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/_header_value_parser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/_parseaddr.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/_policybase.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/base64mime.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/charset.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/encoders.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/errors.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/feedparser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/generator.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/header.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/headerregistry.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/iterators.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/message.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/application.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/audio.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/base.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/image.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/message.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/multipart.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/nonmultipart.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/text.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/parser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/policy.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/quoprimime.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/utils.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/entities.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/parser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/client.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/cookiejar.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/cookies.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/server.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/misc.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/socket.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/socketserver.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/total_ordering.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/error.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/parse.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/request.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/response.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/robotparser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/client.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/server.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/disabled.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/iterators.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/misc.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/new_min_max.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newnext.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newround.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newsuper.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_dummy_thread.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_markupbase.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_thread.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/builtins.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/collections.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/configparser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/copyreg.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/dumb.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/gnu.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/ndbm.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/entities.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/parser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/client.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/cookiejar.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/cookies.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/server.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/itertools.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/pickle.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/queue.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/reprlib.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/socketserver.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/subprocess.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/sys.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/colorchooser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/commondialog.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/constants.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/dialog.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/dnd.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/filedialog.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/font.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/messagebox.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/scrolledtext.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/simpledialog.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/tix.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/ttk.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/error.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/parse.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/request.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/response.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/robotparser.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/winreg.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/client.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/server.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/standard_library/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/tests/base.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newbytes.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newdict.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newint.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newlist.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newmemoryview.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newobject.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newopen.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newrange.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newstr.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/utils/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/utils/surrogateescape.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/LICENSE delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/METADATA delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/top_level.txt delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/auth.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/cacerts.txt delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/certs.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/error.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/iri2uri.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/socks.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/top_level.txt rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{future-0.18.2.dist-info => idna-3.4.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{idna-3.3.dist-info => idna-3.4.dist-info}/LICENSE.md (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{idna-3.3.dist-info => idna-3.4.dist-info}/METADATA (52%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{typing_extensions-4.0.1.dist-info => idna-3.4.dist-info}/WHEEL (72%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/codec.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/core.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/idnadata.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/intranges.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/package_data.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/uts46data.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/LICENSE delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{httplib2-0.19.1.dist-info => importlib_metadata-6.6.0.dist-info}/INSTALLER (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/LICENSE rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{importlib_metadata-4.10.1.dist-info => importlib_metadata-6.6.0.dist-info}/METADATA (66%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{setuptools-60.7.1.dist-info => importlib_metadata-6.6.0.dist-info}/WHEEL (65%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{importlib_metadata-4.10.1.dist-info => importlib_metadata-6.6.0.dist-info}/top_level.txt (100%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_adapters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_collections.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_functools.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_itertools.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_meta.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_py39compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_text.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{idna-3.3.dist-info => importlib_resources-5.12.0.dist-info}/INSTALLER (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/LICENSE create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/METADATA create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunk_sdk-1.6.16.dist-info => importlib_resources-5.12.0.dist-info}/WHEEL (65%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/top_level.txt rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor => }/importlib_resources/__init__.py (100%) mode change 100755 => 100644 rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor => }/importlib_resources/_adapters.py (97%) mode change 100755 => 100644 create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_common.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor => }/importlib_resources/_compat.py (88%) mode change 100755 => 100644 create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_itertools.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor => }/importlib_resources/_legacy.py (98%) mode change 100755 => 100644 rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor => }/importlib_resources/abc.py (66%) mode change 100755 => 100644 rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{future/backports/http/__init__.py => importlib_resources/py.typed} (100%) mode change 100755 => 100644 rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor => }/importlib_resources/readers.py (68%) mode change 100755 => 100644 rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor => }/importlib_resources/simple.py (79%) mode change 100755 => 100644 rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{future/backports/urllib => importlib_resources/tests}/__init__.py (100%) mode change 100755 => 100644 create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/_compat.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/_path.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{future/moves/xmlrpc => importlib_resources/tests/data01}/__init__.py (100%) mode change 100755 => 100644 create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/binary.file rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{future/tests => importlib_resources/tests/data01/subdirectory}/__init__.py (100%) mode change 100755 => 100644 create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/subdirectory/binary.file create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/utf-16.file create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/utf-8.file rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor => importlib_resources/tests/data02}/__init__.py (100%) mode change 100755 => 100644 rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{setuptools/_vendor => importlib_resources/tests/data02/one}/__init__.py (100%) mode change 100755 => 100644 create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/one/resource1.txt create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{requests-2.26.0.dist-info/REQUESTED => importlib_resources/tests/data02/two/__init__.py} (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/two/resource2.txt create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/binary.file create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/utf-16.file create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/utf-8.file create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_compatibilty_files.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_contents.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_custom.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_files.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_open.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_path.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_read.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_reader.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_resource.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/update-zips.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/util.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{solnlib-4.4.1.dist-info/REQUESTED => importlib_resources/tests/zipdata01/__init__.py} (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata01/ziptestdata.zip rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunk_sdk-1.6.16.dist-info/REQUESTED => importlib_resources/tests/zipdata02/__init__.py} (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata02/ziptestdata.zip mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/_compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/_identifier.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/asyncfilters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/asyncsupport.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/bccache.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/compiler.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/constants.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/debug.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/defaults.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/environment.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/ext.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/filters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/idtracking.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/lexer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/loaders.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/meta.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/nativetypes.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/nodes.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/optimizer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/parser.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/runtime.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/sandbox.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/tests.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/visitor.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/ordereddict.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/prepareable.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/document.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/base.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/compound.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/primitive.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/registry.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/resolutionscope.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/roles.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/comments.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/wrapper.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/bin/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/bin/jsonpath.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/arithmetic.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/filter.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/iterable.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/parser.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/string.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/jsonpath.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/lexer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/parser.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/bin/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/bin/jsonpath.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/jsonpath.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/lexer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/parser.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/WHEEL rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{jsonschema-3.2.0.dist-info => jsonschema-4.4.0.dist-info}/COPYING (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{importlib_metadata-4.10.1.dist-info => jsonschema-4.4.0.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{jsonschema-3.2.0.dist-info => jsonschema-4.4.0.dist-info}/METADATA (61%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktaucclib-5.0.7.dist-info => jsonschema-4.4.0.dist-info}/REQUESTED (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{charset_normalizer-2.0.11.dist-info => jsonschema-4.4.0.dist-info}/WHEEL (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{jsonschema-3.2.0.dist-info => jsonschema-4.4.0.dist-info}/entry_points.txt (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{jsonschema-3.2.0.dist-info => jsonschema-4.4.0.dist-info}/top_level.txt (100%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__main__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_format.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_legacy_validators.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_reflect.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_types.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_validators.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/issue232.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/json_schema_test_suite.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/cli.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/exceptions.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/protocols.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft2019-09.json create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft2020-12.json create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/vocabularies.json mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/_helpers.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/_suite.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/fuzz_validate.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_cli.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_deprecations.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_format.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_types.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_validators.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/validators.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/__main__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/_version.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/cli.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/driver.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/misc/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/bases.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/bases.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/stages.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/bases.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/providers.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/bases.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/draft03.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/draft04.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/factorize.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/formats.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/pointer_util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/__main__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/btm_matcher.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/btm_utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixer_base.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixer_util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_apply.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_asserts.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_basestring.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_buffer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_dict.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_except.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_exec.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_execfile.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_exitfunc.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_filter.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_funcattrs.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_future.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_getcwdu.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_has_key.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_idioms.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_import.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_imports.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_imports2.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_input.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_intern.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_isinstance.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_itertools.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_itertools_imports.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_long.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_map.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_metaclass.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_methodattrs.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_ne.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_next.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_nonzero.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_numliterals.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_operator.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_paren.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_print.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_raise.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_raw_input.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_reduce.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_reload.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_renames.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_repr.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_set_literal.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_standarderror.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_sys_exc.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_throw.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_tuple_params.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_types.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_unicode.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_urllib.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_ws_comma.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_xrange.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_xreadlines.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_zip.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/main.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/patcomp.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/conv.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/driver.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/grammar.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/literals.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/parse.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/pgen.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/tokenize.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pygram.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pytree.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/refactor.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/__main__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/bom.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/crlf.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/bad_order.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_explicit.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_first.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_last.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_parrot.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_preorder.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/no_fixer_cls.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/parrot_example.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/infinite_recursion.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/py2_test_grammar.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/py3_test_grammar.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/support.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_all_fixers.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_fixers.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_main.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_parser.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_pytree.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_refactor.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixer_util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_UserDict.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_absolute_import.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_basestring.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_bytes.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_cmp.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_division.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_division_safe.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_execfile.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_builtins.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_standard_library.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_standard_library_urllib.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_input.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_metaclass.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_next_call.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_object.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_oldstr_wrap.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_order___future__imports.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_print.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_print_with_import.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_raise.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_remove_old__future__imports.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_unicode_keep_u.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_unicode_literals_import.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_xrange_with_import.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/main.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/feature_base.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_all__future__imports.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_all_future_builtins.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_future_standard_library_import.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_annotations.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_division.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_features.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_fullargspec.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_future_builtins.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_getcwd.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_imports.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_imports2.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_kwargs.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_memoryview.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_metaclass.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_newstyle.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_next.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_printfunction.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_raise.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_raise_.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_throw.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_unpacking.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/main.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/_ast_util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ast.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cache.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cmd.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/codegen.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/autohandler.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/babelplugin.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/beaker_cache.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/extract.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/linguaplugin.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/preprocessors.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/pygmentplugin.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/turbogears.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/filters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lexer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lookup.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/parsetree.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pygen.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pyparser.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/runtime.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/template.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/__init__.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/_config.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/assertions.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/config.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/exclusions.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/fixtures.py create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/helpers.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/markupsafe/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/markupsafe/_native.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/modinput_wrapper/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch/python3_compat.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/misc.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/noniterators.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/translation/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/basestring.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/olddict.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/oldstr.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/utils/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/appdirs.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_common.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_itertools.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/context.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/functools.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/text/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/more.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/recipes.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/__about__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_manylinux.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_musllinux.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_structures.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/markers.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/requirements.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/specifiers.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/tags.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/utils.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/version.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/pyparsing.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/extern/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/cpp.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/ctokens.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/lex.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/yacc.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/ygen.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/LICENSE delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/METADATA delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/WHEEL delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/top_level.txt delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/WHEEL rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{jsonschema-3.2.0.dist-info => pyrsistent-0.19.3.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pyrsistent-0.18.1.dist-info => pyrsistent-0.19.3.dist-info}/LICENSE.mit (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pyrsistent-0.18.1.dist-info => pyrsistent-0.19.3.dist-info}/METADATA (96%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pyrsistent-0.18.1.dist-info => pyrsistent-0.19.3.dist-info}/RECORD (56%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{httplib2-0.19.1.dist-info => pyrsistent-0.19.3.dist-info}/WHEEL (65%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pyrsistent-0.18.1.dist-info => pyrsistent-0.19.3.dist-info}/top_level.txt (77%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_checked_types.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_field_common.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_helpers.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_immutable.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pbag.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pclass.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pdeque.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_plist.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pmap.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_precord.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pset.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pvector.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_toolz.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_transformations.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/typing.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/WHEEL rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pyparsing-2.4.7.dist-info => requests-2.28.0.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{requests-2.26.0.dist-info => requests-2.28.0.dist-info}/LICENSE (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{requests-2.26.0.dist-info => requests-2.28.0.dist-info}/METADATA (71%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/RECORD create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/REQUESTED rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{future-0.18.2.dist-info => requests-2.28.0.dist-info}/WHEEL (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{requests-2.26.0.dist-info => requests-2.28.0.dist-info}/top_level.txt (100%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__version__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/_internal_utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/adapters.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/api.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/auth.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/certs.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/cookies.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/help.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/hooks.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/models.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/packages.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/sessions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/status_codes.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/structures.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/utils.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/METADATA delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/entry_points.txt delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/top_level.txt delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_deprecation_warning.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/_collections.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/_msvccompiler.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/archive_util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/bcppcompiler.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/ccompiler.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/cmd.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_dumb.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_msi.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_rpm.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_wininst.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_clib.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_ext.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_py.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_scripts.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/check.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/clean.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/config.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_data.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_egg_info.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_headers.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_lib.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_scripts.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/py37compat.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/register.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/sdist.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/upload.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/config.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/core.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/cygwinccompiler.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/debug.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dep_util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dir_util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dist.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/errors.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/extension.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/fancy_getopt.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/file_util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/filelist.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/log.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/msvc9compiler.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/msvccompiler.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/py35compat.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/py38compat.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/spawn.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/sysconfig.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/text_file.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/unixccompiler.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/version.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/versionpredicate.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_imp.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/more.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/recipes.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/ordered_set.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/__about__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_manylinux.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_musllinux.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_structures.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/markers.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/requirements.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/specifiers.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/tags.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/utils.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/version.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/pyparsing.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/archive_util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/build_meta.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-32.exe delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-64.exe delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-arm64.exe delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli.exe delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/alias.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/bdist_egg.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/bdist_rpm.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_clib.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_ext.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_py.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/develop.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/dist_info.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/easy_install.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/egg_info.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_egg_info.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_lib.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_scripts.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/launcher manifest.xml delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/py36compat.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/register.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/rotate.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/saveopts.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/sdist.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/setopt.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/test.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/upload.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/upload_docs.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/config.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/dep_util.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/depends.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/dist.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/errors.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/extension.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/extern/__init__.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/glob.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-32.exe delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-64.exe delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-arm64.exe delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui.exe delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/installer.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/launch.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/logging.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/monkey.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/msvc.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/namespaces.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/package_index.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/py34compat.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/sandbox.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/script (dev).tmpl delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/script.tmpl delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/unicode_utils.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/version.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/wheel.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/windows_support.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/six.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/socks.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sockshandler.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pyrsistent-0.18.1.dist-info => solnlib-4.7.0.dist-info}/INSTALLER (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/LICENSE rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{solnlib-4.4.1.dist-info => solnlib-4.7.0.dist-info}/METADATA (88%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{solnlib-4.4.1.dist-info => solnlib-4.7.0.dist-info}/RECORD (58%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/REQUESTED create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/WHEEL mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/_utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/acl.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/conf_manager.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/credentials.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/file_monitor.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/hec_config.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/log.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/checkpointer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/event.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/event_writer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/modular_input.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/net_utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/orphan_process_monitor.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/pattern.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/server_info.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/splunk_rest_client.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/splunkenv.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/time_parser.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/timer_queue.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/user_access.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sorteddict.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sortedlist.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sortedset.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_aoblib/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_aoblib/utility.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{requests-2.26.0.dist-info => splunk_sdk-1.6.18.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunk_sdk-1.6.16.dist-info => splunk_sdk-1.6.18.dist-info}/METADATA (91%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunk_sdk-1.6.16.dist-info => splunk_sdk-1.6.18.dist-info}/RECORD (64%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/REQUESTED create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/WHEEL rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunk_sdk-1.6.16.dist-info => splunk_sdk-1.6.18.dist-info}/top_level.txt (100%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/binding.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/client.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/data.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/argument.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/event.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/event_writer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/input_definition.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/scheme.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/script.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/utils.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/validation_definition.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/ordereddict.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/results.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/decorators.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/environment.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/eventing_command.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/external_search_command.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/generating_command.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/internals.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/reporting_command.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/search_command.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/streaming_command.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/validators.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/six.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{setuptools-60.7.1.dist-info => splunktalib-3.0.0.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktalib-2.2.6.dist-info => splunktalib-3.0.0.dist-info}/LICENSE (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktalib-2.2.6.dist-info => splunktalib-3.0.0.dist-info}/METADATA (92%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktalib-2.2.6.dist-info => splunktalib-3.0.0.dist-info}/RECORD (70%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{solnlib-4.4.1.dist-info => splunktalib-3.0.0.dist-info}/WHEEL (100%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/consts.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/log.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/pattern.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/xml_dom_parser.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/concurrent_executor.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/process_pool.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/thread_pool.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/conf_endpoints.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/conf_manager.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/data_input_endpoints.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/property_endpoints.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/request.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/ta_conf_manager.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/credentials.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/event_writer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/file_monitor.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/kv_client.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/modinput.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/orphan_process_monitor.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/rest.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/job.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/scheduler.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/splunk_cluster.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/splunk_platform.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/state_store.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/timer.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/timer_queue.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{solnlib-4.4.1.dist-info => splunktaucclib-6.0.0.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktaucclib-5.0.7.dist-info => splunktaucclib-6.0.0.dist-info}/LICENSE (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktaucclib-5.0.7.dist-info => splunktaucclib-6.0.0.dist-info}/METADATA (79%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktaucclib-5.0.7.dist-info => splunktaucclib-6.0.0.dist-info}/RECORD (79%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/REQUESTED rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktalib-2.2.6.dist-info => splunktaucclib-6.0.0.dist-info}/WHEEL (100%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/alert_actions_base.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/cim_actions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/common/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/common/log.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/config.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_checkpoint_manager.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_config.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_consts.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_client.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_collector.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_loader.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_helper.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_mod_input.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/configuration.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/schema.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/modinput_wrapper/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/modinput_wrapper/base_modinput.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/admin_external.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/base.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/base_hook_mixin.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/cred_mgmt.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/credentials.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/datainput.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/eai.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/converter.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/field.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/validator.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/entity.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/error.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/error_ctl.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/handler.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/multimodel.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/normaliser.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/poster.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/schema.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/teardown.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/validator.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/rest_helper.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/rest_migration.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/setup_util.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/utility.py delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/INSTALLER delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/METADATA delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunk_sdk-1.6.16.dist-info => typing_extensions-4.5.0.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{typing_extensions-4.0.1.dist-info => typing_extensions-4.5.0.dist-info}/LICENSE (94%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/METADATA create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktaucclib-5.0.7.dist-info => typing_extensions-4.5.0.dist-info}/WHEEL (71%) mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktalib-2.2.6.dist-info => urllib3-1.26.15.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{urllib3-1.26.8.dist-info => urllib3-1.26.15.dist-info}/LICENSE.txt (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{urllib3-1.26.8.dist-info => urllib3-1.26.15.dist-info}/METADATA (93%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/RECORD rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{attrs-21.4.0.dist-info => urllib3-1.26.15.dist-info}/WHEEL (70%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{urllib3-1.26.8.dist-info => urllib3-1.26.15.dist-info}/top_level.txt (100%) delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/INSTALLER delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/WHEEL mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/_collections.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/_version.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connection.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connectionpool.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_appengine_environ.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/bindings.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/low_level.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/appengine.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/ntlmpool.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/pyopenssl.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/securetransport.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/socks.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/exceptions.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/fields.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/filepost.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/backports/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/backports/makefile.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/six.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/poolmanager.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/request.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/response.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/__init__.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/connection.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/proxy.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/queue.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/request.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/response.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/retry.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssl_.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssl_match_hostname.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssltransport.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/timeout.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/url.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/wait.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{splunktaucclib-5.0.7.dist-info => zipp-3.15.0.dist-info}/INSTALLER (100%) rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{setuptools-60.7.1.dist-info => zipp-3.15.0.dist-info}/LICENSE (100%) create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/METADATA create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/RECORD create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/WHEEL rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{zipp-3.7.0.dist-info => zipp-3.15.0.dist-info}/top_level.txt (100%) delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/INSTALLER delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/LICENSE delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/METADATA delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/RECORD delete mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/WHEEL delete mode 100755 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp.py rename Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/{pkg_resources/_vendor/zipp.py => zipp/__init__.py} (71%) mode change 100755 => 100644 create mode 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp/py310compat.py mode change 100755 => 100644 Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter_declare.py diff --git a/Splunk-TA-cisco-dnacenter/app.manifest b/Splunk-TA-cisco-dnacenter/app.manifest index d4c1bb5..80c260d 100644 --- a/Splunk-TA-cisco-dnacenter/app.manifest +++ b/Splunk-TA-cisco-dnacenter/app.manifest @@ -5,7 +5,7 @@ "id": { "group": null, "name": "Splunk-TA-cisco-dnacenter", - "version": "1.0.0" + "version": "1.0.4" }, "author": [ { diff --git a/Splunk-TA-cisco-dnacenter/appserver/static/js/build/globalConfig.json b/Splunk-TA-cisco-dnacenter/appserver/static/js/build/globalConfig.json index 4e86cc5..9d830ce 100644 --- a/Splunk-TA-cisco-dnacenter/appserver/static/js/build/globalConfig.json +++ b/Splunk-TA-cisco-dnacenter/appserver/static/js/build/globalConfig.json @@ -143,6 +143,10 @@ { "field": "index", "label": "Index" + }, + { + "field": "disabled", + "label": "Status" } ], "moreInfo": [ @@ -158,6 +162,10 @@ "field": "index", "label": "Index" }, + { + "field": "disabled", + "label": "Status" + }, { "field": "cisco_dna_center_host", "label": "Cisco DNA Center Host" diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_account.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_account.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_clienthealth.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_clienthealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_compliance.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_compliance.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_devicehealth.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_devicehealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_issue.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_issue.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_networkhealth.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_networkhealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_securityadvisory.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_securityadvisory.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_settings.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_settings.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_clienthealth.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_clienthealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_compliance.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_compliance.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_devicehealth.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_devicehealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_issue.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_issue.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_networkhealth.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_networkhealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_securityadvisory.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_securityadvisory.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_clienthealth.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_clienthealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_compliance.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_compliance.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_devicehealth.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_devicehealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_issue.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_issue.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_networkhealth.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_networkhealth.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_securityadvisory.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_securityadvisory.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/AUTHORS b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/AUTHORS deleted file mode 100644 index 81d16dc..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/AUTHORS +++ /dev/null @@ -1,13 +0,0 @@ -Mako was created by Michael Bayer. - -Major contributing authors include: - -- Michael Bayer -- Geoffrey T. Dairiki -- Philip Jenvey -- David Peckam -- Armin Ronacher -- Ben Bangert -- Ben Trofatter - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/RECORD deleted file mode 100644 index 459e0e4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/RECORD +++ /dev/null @@ -1,36 +0,0 @@ -../../bin/mako-render,sha256=Abuf-Q4pfuZRPuU3bFhVoP58TnIFa4hsg1lDpMVEqIk,213 -Mako-1.1.0.dist-info/AUTHORS,sha256=Io2Vw70mjYS7yFcUuJxhIGiMUQt8FWJuxiiwyUW1WRg,282 -Mako-1.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -Mako-1.1.0.dist-info/LICENSE,sha256=w7EkZzyThyFWdXqTTHW0mNaO7AlRDiXlSdncIBN3ZJk,1097 -Mako-1.1.0.dist-info/METADATA,sha256=rYwZpxohFsfbyCtc2EkFdkX3AwsNYtcbej_gSo1LM5E,2517 -Mako-1.1.0.dist-info/RECORD,, -Mako-1.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -Mako-1.1.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -Mako-1.1.0.dist-info/entry_points.txt,sha256=pRPyt8V0Ss7h84ZQVCX9MkK0Xo5MClQWlQ67pJ4bGdU,586 -Mako-1.1.0.dist-info/top_level.txt,sha256=LItdH8cDPetpUu8rUyBG3DObS6h9Gcpr9j_WLj2S-R0,5 -mako/__init__.py,sha256=_aGTYpZhxW3tTjfMiZRDMe7a5t83KyFC8SBrakW187M,242 -mako/_ast_util.py,sha256=kCfMGlNJz3ooAOh70SmPjgcEeodfyTHOIYGKQTJq0dQ,20414 -mako/ast.py,sha256=U3eOguvzWZJmEHqosCs9E7OdCzozF96GHr2kD4ryjdw,6789 -mako/cache.py,sha256=N1RtINCiEAYPWA45PEnePMqU8ZMiFsWdcbd703sbm9I,7736 -mako/cmd.py,sha256=KTeCI4vqL2pV2jlRjV2lgZrOlmbDqj_xPUzzRR8pMJ4,2471 -mako/codegen.py,sha256=P0lAlTRKI7MRVoGor4T2Mc3mETldJvZw03pDNQ5Ms_c,47892 -mako/compat.py,sha256=Kgaru5DR4TU51iOh797RmdGVjuNOxu2-G0ZnuPDCkQI,3848 -mako/exceptions.py,sha256=aNqpkbKDVP_C9k1j7OgM7UrtFZhriDR82uRfj_7iyjk,13158 -mako/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -mako/ext/autohandler.py,sha256=9U1iazp-tQrODVOUDqgsw5-ctVjRKy7vDVieOuLjO54,1885 -mako/ext/babelplugin.py,sha256=CGu4LFEPFg83AATTgqOBOC-Ce3U_597ERb6MQYwkLs8,2138 -mako/ext/beaker_cache.py,sha256=anK7g8cjmDLHTJ5o8aDS3LrzudeBvrEMe0eUx2p1nSs,2599 -mako/ext/extract.py,sha256=PSgnjKSVRBnSm0mQj7hReqICpwb0oR7Gn_S9Xl3kaN4,4616 -mako/ext/linguaplugin.py,sha256=uct4HiA9Mc-vwRTxM3mmrtN3pEVsERft01_xp_Go8as,1954 -mako/ext/preprocessors.py,sha256=CpTghGtGi0hTpg9mgbyJQ0LzL23OKUp3nPe2Jz-BUwM,576 -mako/ext/pygmentplugin.py,sha256=JOGLEGKZiBRZ1TQrjmqpSJEprr7PHYLpTAA_TBDCAsM,4951 -mako/ext/turbogears.py,sha256=SnaqDOA03mwExUmmtSsBKTKpTdZqbxCjsONUWTgj4ds,2165 -mako/filters.py,sha256=gsNMSq2KSZfAiCaqVeojrUevFxCrI5uZOKnyLkRGMcU,6063 -mako/lexer.py,sha256=lPjsDAuam4Sc8VmIHumMVHV8azmFpIRISyWOiunblVs,16926 -mako/lookup.py,sha256=Yl4W3OMAh875v7YMQnLmHHlf-TKHm99-gBQVtuIb3Pc,12718 -mako/parsetree.py,sha256=HvZxqgYEtY3BOIagiSIYyysEPmNRRmAUzq3JeqwgtQ4,19411 -mako/pygen.py,sha256=edYGqAn2XcBPkwgfoALRziLEcIG231szwAGwmSu-9Q8,10073 -mako/pyparser.py,sha256=moglp-107EIdIA6bMVt6oHvZmPbERJV8ieIyhUSZ6vM,7789 -mako/runtime.py,sha256=y38I58jUR5kCB1DxOzXqvneuAnkxc8xifh21vli73EY,28040 -mako/template.py,sha256=wfRlvGu3IaRZ2WfZjy6Zf6c2qksvAVwDpTggKTVIpic,26530 -mako/util.py,sha256=wIeZpUByHPBnIAXgMXvEpyTY7uJAGJ5dZmXOnpKVRuY,11015 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/entry_points.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/entry_points.txt deleted file mode 100644 index 3b15006..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/entry_points.txt +++ /dev/null @@ -1,20 +0,0 @@ - - [python.templating.engines] - mako = mako.ext.turbogears:TGPlugin - - [pygments.lexers] - mako = mako.ext.pygmentplugin:MakoLexer - html+mako = mako.ext.pygmentplugin:MakoHtmlLexer - xml+mako = mako.ext.pygmentplugin:MakoXmlLexer - js+mako = mako.ext.pygmentplugin:MakoJavascriptLexer - css+mako = mako.ext.pygmentplugin:MakoCssLexer - - [babel.extractors] - mako = mako.ext.babelplugin:extract - - [lingua.extractors] - mako = mako.ext.linguaplugin:LinguaMakoExtractor - - [console_scripts] - mako-render = mako.cmd:cmdline - \ No newline at end of file diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/LICENSE similarity index 93% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/LICENSE index ec32acf..be84a38 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/LICENSE +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/LICENSE @@ -1,4 +1,4 @@ -Copyright 2006-2019 the Mako authors and contributors . +Copyright 2006-2022 the Mako authors and contributors . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in @@ -16,4 +16,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/METADATA similarity index 78% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/METADATA index 5328169..33a7580 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/METADATA @@ -1,28 +1,37 @@ Metadata-Version: 2.1 Name: Mako -Version: 1.1.0 -Summary: A super-fast templating language that borrows the best ideas from the existing templating languages. +Version: 1.2.4 +Summary: A super-fast templating language that borrows the best ideas from the existing templating languages. Home-page: https://www.makotemplates.org/ Author: Mike Bayer Author-email: mike@zzzcomputing.com License: MIT Project-URL: Documentation, https://docs.makotemplates.org Project-URL: Issue Tracker, https://github.com/sqlalchemy/mako -Keywords: templates -Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: MIT License Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst License-File: LICENSE -License-File: AUTHORS Requires-Dist: MarkupSafe (>=0.9.2) +Requires-Dist: importlib-metadata ; python_version < "3.8" +Provides-Extra: babel +Requires-Dist: Babel ; extra == 'babel' +Provides-Extra: lingua +Requires-Dist: lingua ; extra == 'lingua' +Provides-Extra: testing +Requires-Dist: pytest ; extra == 'testing' ========================= Mako Templates for Python @@ -76,5 +85,3 @@ License Mako is licensed under an MIT-style license (see LICENSE). Other incorporated projects may be licensed under different licenses. All licenses allow for non-commercial and commercial use. - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/RECORD new file mode 100644 index 0000000..89c5b59 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/RECORD @@ -0,0 +1,42 @@ +../../bin/mako-render,sha256=Abuf-Q4pfuZRPuU3bFhVoP58TnIFa4hsg1lDpMVEqIk,213 +Mako-1.2.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Mako-1.2.4.dist-info/LICENSE,sha256=dg8is-nqSlDrmSAb2N0RiGnygQjPtkzM5tGzBc-a6fo,1098 +Mako-1.2.4.dist-info/METADATA,sha256=MlPkZcQ5bASEMtzkRaH8aRSQE6gmLH3KTnASUawz6eA,2909 +Mako-1.2.4.dist-info/RECORD,, +Mako-1.2.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +Mako-1.2.4.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +Mako-1.2.4.dist-info/entry_points.txt,sha256=LsKkUsOsJQYbJ2M72hZCm968wi5K8Ywb5uFxCuN8Obk,512 +Mako-1.2.4.dist-info/top_level.txt,sha256=LItdH8cDPetpUu8rUyBG3DObS6h9Gcpr9j_WLj2S-R0,5 +mako/__init__.py,sha256=R1cQoVGhYA-fl43kNSPKm6kzdJOs28e8sq8WYMHctMQ,242 +mako/_ast_util.py,sha256=BcwJLuE4E-aiFXi_fanO378Cn3Ou03bJxc6Incjse4Y,20247 +mako/ast.py,sha256=h07xBpz2l19RSwpejrhkhgB4r5efpwGmsYOy_L8xvUc,6642 +mako/cache.py,sha256=jkspun9tLgu0IVKSmo_fkL_DAbSTl2P5a5zkMBkjZvk,7680 +mako/cmd.py,sha256=vQg9ip89KMsuZEGamCRAPg7UyDNlpMmnG3XHDNLHS5o,2814 +mako/codegen.py,sha256=h1z8DGLkB92nbUz2OZGVmUKqPr9kVNbnNL8KnLizYAk,47309 +mako/compat.py,sha256=Sa3Rzrjl44xo25nXUHbhfIrEoMgceq5-Ohl0FO6cCHk,1913 +mako/exceptions.py,sha256=xQZKYdb-4d8rcrNFsFzjGSEuNG4upFqGNPErtSCDqfI,12530 +mako/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mako/ext/autohandler.py,sha256=-hNv4VHbQplLGDt5e4mFsBC-QpfWMjKokOe0axDP308,1885 +mako/ext/babelplugin.py,sha256=s6ZIAh1hUhsJIiF3j4soVHrFN_1cRJ_e3sEbz7ein7k,2091 +mako/ext/beaker_cache.py,sha256=D6gh_ke7QOKiSJtq9v67RvmqCRMDJx-IwTcd-NDjKvk,2578 +mako/ext/extract.py,sha256=EhXglj2eW5u80T3xWWB7jMgL8oNDfAQaD5E5IRiL9N0,4659 +mako/ext/linguaplugin.py,sha256=iLip2gZ0ya5pooHrxwZrP8VFQfJidXmgPZ5h1j30Kow,1935 +mako/ext/preprocessors.py,sha256=pEUbmfSO2zb4DuCt_-_oYnWypWiXs4MnJHxjTMiks5A,576 +mako/ext/pygmentplugin.py,sha256=GuOd93TjetzpTfW5oUEtuPS7jKDHgJIH3Faiaq76S0c,4753 +mako/ext/turbogears.py,sha256=mxFDF59NFK6cm__3qwGjZ1VAW0qdjJWNj23l6dcwqEg,2141 +mako/filters.py,sha256=rlHJ2L5RFr5Gf-MyOJKZI7TSJpM5oBXH58niJWCp2-4,4658 +mako/lexer.py,sha256=GOHNLeSlTIEa_yV8W5Qr27SjaPlJcO0Kij7Z2rpUkCA,15982 +mako/lookup.py,sha256=_2VPSA2CgCiT0Vd9GnSIjyY5wlpXiB2C5luXJP7gym8,12429 +mako/parsetree.py,sha256=pXbZP0orsT3iBIgWa9yD1TEfvytsCaXu2Ttws8RTMGM,19007 +mako/pygen.py,sha256=K-l_hsvXfWdMTunfHyVxvA5EG4Uzr4Qaw6IUc3hw8zI,10416 +mako/pyparser.py,sha256=diSXgo-ZwdZxbRsNZ1DmARQKVnlOFc6Qgx9Dc3wZB_U,7032 +mako/runtime.py,sha256=MwO5T1rGy0yLeJiFh2hh5cO_kfd5_9fJq_vfBzLFe_0,27806 +mako/template.py,sha256=gEhMPjKZ1Q_sYWWg6PLnRX-KBeTF0kBnyRZimlmgQks,23858 +mako/testing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mako/testing/_config.py,sha256=k-qpnsnbXUoN-ykMN5BRpg84i1x0p6UsAddKQnrIytU,3566 +mako/testing/assertions.py,sha256=XnYDPSnDFiEX9eO95OZ5LndZrUpJ6_xGofe6qDzJxqU,5162 +mako/testing/config.py,sha256=wmYVZfzGvOK3mJUZpzmgO8-iIgvaCH41Woi4yDpxq6E,323 +mako/testing/exclusions.py,sha256=_t6ADKdatk3f18tOfHV_ZY6u_ZwQsKphZ2MXJVSAOcI,1553 +mako/testing/fixtures.py,sha256=nEp7wTusf7E0n3Q-BHJW2s_t1vx0KB9poadQ1BmIJzE,3044 +mako/testing/helpers.py,sha256=kTaIg8OL1uvcuLptbRA_aJtGndIDDaxAzacYbv_Km1Q,1521 +mako/util.py,sha256=XmYQmq6WfMAt-BPM7zhT9lybEqHVIWCM9wF1ukzqpew,10638 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/REQUESTED similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/REQUESTED rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/REQUESTED diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/WHEEL similarity index 65% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/WHEEL index becc9a6..57e3d84 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) +Generator: bdist_wheel (0.38.4) Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/entry_points.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/entry_points.txt new file mode 100644 index 0000000..30f31b2 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/entry_points.txt @@ -0,0 +1,18 @@ +[babel.extractors] +mako = mako.ext.babelplugin:extract [babel] + +[console_scripts] +mako-render = mako.cmd:cmdline + +[lingua.extractors] +mako = mako.ext.linguaplugin:LinguaMakoExtractor [lingua] + +[pygments.lexers] +css+mako = mako.ext.pygmentplugin:MakoCssLexer +html+mako = mako.ext.pygmentplugin:MakoHtmlLexer +js+mako = mako.ext.pygmentplugin:MakoJavascriptLexer +mako = mako.ext.pygmentplugin:MakoLexer +xml+mako = mako.ext.pygmentplugin:MakoXmlLexer + +[python.templating.engines] +mako = mako.ext.turbogears:TGPlugin diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.2.4.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/MarkupSafe-2.0.1.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/MarkupSafe-2.0.1.dist-info/RECORD index 8fe0b5c..ab618d3 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/MarkupSafe-2.0.1.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/MarkupSafe-2.0.1.dist-info/RECORD @@ -2,6 +2,7 @@ MarkupSafe-2.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7ze MarkupSafe-2.0.1.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 MarkupSafe-2.0.1.dist-info/METADATA,sha256=lknelt-VPHWai5EJcvZpATGKVbXkg74h7CQuPwDS71U,3237 MarkupSafe-2.0.1.dist-info/RECORD,, +MarkupSafe-2.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 MarkupSafe-2.0.1.dist-info/WHEEL,sha256=T7Cp5xu87yB0VfKahSR3N0JT_FVycX4pq6-fNwtW39g,221 MarkupSafe-2.0.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 markupsafe/__init__.py,sha256=9Tez4UIlI7J6_sQcUFK1dKniT_b_8YefpGIyYJ3Sr2Q,8923 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/MarkupSafe-2.0.1.dist-info/REQUESTED similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/REQUESTED rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/MarkupSafe-2.0.1.dist-info/REQUESTED diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_distutils_hack/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_distutils_hack/__init__.py deleted file mode 100755 index 1f8daf4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_distutils_hack/__init__.py +++ /dev/null @@ -1,220 +0,0 @@ -# don't import any costly modules -import sys -import os - - -is_pypy = '__pypy__' in sys.builtin_module_names - - -def warn_distutils_present(): - if 'distutils' not in sys.modules: - return - if is_pypy and sys.version_info < (3, 7): - # PyPy for 3.6 unconditionally imports distutils, so bypass the warning - # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 - return - import warnings - warnings.warn( - "Distutils was imported before Setuptools, but importing Setuptools " - "also replaces the `distutils` module in `sys.modules`. This may lead " - "to undesirable behaviors or errors. To avoid these issues, avoid " - "using distutils directly, ensure that setuptools is installed in the " - "traditional way (e.g. not an editable install), and/or make sure " - "that setuptools is always imported before distutils.") - - -def clear_distutils(): - if 'distutils' not in sys.modules: - return - import warnings - warnings.warn("Setuptools is replacing distutils.") - mods = [ - name for name in sys.modules - if name == "distutils" or name.startswith("distutils.") - ] - for name in mods: - del sys.modules[name] - - -def enabled(): - """ - Allow selection of distutils by environment variable. - """ - which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') - return which == 'local' - - -def ensure_local_distutils(): - import importlib - clear_distutils() - - # With the DistutilsMetaFinder in place, - # perform an import to cause distutils to be - # loaded from setuptools._distutils. Ref #2906. - with shim(): - importlib.import_module('distutils') - - # check that submodules load as expected - core = importlib.import_module('distutils.core') - assert '_distutils' in core.__file__, core.__file__ - - -def do_override(): - """ - Ensure that the local copy of distutils is preferred over stdlib. - - See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 - for more motivation. - """ - if enabled(): - warn_distutils_present() - ensure_local_distutils() - - -class _TrivialRe: - def __init__(self, *patterns): - self._patterns = patterns - - def match(self, string): - return all(pat in string for pat in self._patterns) - - -class DistutilsMetaFinder: - def find_spec(self, fullname, path, target=None): - if path is not None: - return - - method_name = 'spec_for_{fullname}'.format(**locals()) - method = getattr(self, method_name, lambda: None) - return method() - - def spec_for_distutils(self): - if self.is_cpython(): - return - - import importlib - import importlib.abc - import importlib.util - - try: - mod = importlib.import_module('setuptools._distutils') - except Exception: - # There are a couple of cases where setuptools._distutils - # may not be present: - # - An older Setuptools without a local distutils is - # taking precedence. Ref #2957. - # - Path manipulation during sitecustomize removes - # setuptools from the path but only after the hook - # has been loaded. Ref #2980. - # In either case, fall back to stdlib behavior. - return - - class DistutilsLoader(importlib.abc.Loader): - - def create_module(self, spec): - return mod - - def exec_module(self, module): - pass - - return importlib.util.spec_from_loader( - 'distutils', DistutilsLoader(), origin=mod.__file__ - ) - - @staticmethod - def is_cpython(): - """ - Suppress supplying distutils for CPython (build and tests). - Ref #2965 and #3007. - """ - return os.path.isfile('pybuilddir.txt') - - def spec_for_pip(self): - """ - Ensure stdlib distutils when running under pip. - See pypa/pip#8761 for rationale. - """ - if self.pip_imported_during_build(): - return - clear_distutils() - self.spec_for_distutils = lambda: None - - def spec_for_setuptools(self): - """ - get-pip imports setuptools solely for the purpose of - determining if it's installed. In this case, provide - a stubbed spec to represent setuptools being present - without invoking any behavior. - - Workaround for pypa/get-pip#137. Ref #2993. - """ - if not self.is_script('get-pip'): - return - - import importlib - - class StubbedLoader(importlib.abc.Loader): - - def create_module(self, spec): - import types - return types.ModuleType('setuptools') - - def exec_module(self, module): - pass - - return importlib.util.spec_from_loader( - 'setuptools', StubbedLoader(), - ) - - @classmethod - def pip_imported_during_build(cls): - """ - Detect if pip is being imported in a build script. Ref #2355. - """ - import traceback - return any( - cls.frame_file_is_setup(frame) - for frame, line in traceback.walk_stack(None) - ) - - @staticmethod - def is_script(name): - try: - import __main__ - return os.path.basename(__main__.__file__) == f'{name}.py' - except AttributeError: - pass - - @staticmethod - def frame_file_is_setup(frame): - """ - Return True if the indicated frame suggests a setup.py file. - """ - # some frames may not have __file__ (#2940) - return frame.f_globals.get('__file__', '').endswith('setup.py') - - -DISTUTILS_FINDER = DistutilsMetaFinder() - - -def add_shim(): - DISTUTILS_FINDER in sys.meta_path or insert_shim() - - -class shim: - def __enter__(self): - insert_shim() - - def __exit__(self, exc, value, tb): - remove_shim() - - -def insert_shim(): - sys.meta_path.insert(0, DISTUTILS_FINDER) - - -def remove_shim(): - try: - sys.meta_path.remove(DISTUTILS_FINDER) - except ValueError: - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_distutils_hack/override.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_distutils_hack/override.py deleted file mode 100755 index 2cc433a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_distutils_hack/override.py +++ /dev/null @@ -1 +0,0 @@ -__import__('_distutils_hack').do_override() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_pyrsistent_version.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_pyrsistent_version.py old mode 100755 new mode 100644 index 5877c8d..b699138 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_pyrsistent_version.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/_pyrsistent_version.py @@ -1 +1 @@ -__version__ = '0.18.1' +__version__ = '0.19.3' diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.py old mode 100755 new mode 100644 index f95c96d..7cfa792 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.py @@ -1,10 +1,11 @@ # SPDX-License-Identifier: MIT -from __future__ import absolute_import, division, print_function - -import sys +""" +Classes Without Boilerplate +""" from functools import partial +from typing import Callable from . import converters, exceptions, filters, setters, validators from ._cmp import cmp_using @@ -21,31 +22,22 @@ make_class, validate, ) +from ._next_gen import define, field, frozen, mutable from ._version_info import VersionInfo -__version__ = "21.4.0" -__version_info__ = VersionInfo._from_version_string(__version__) - -__title__ = "attrs" -__description__ = "Classes Without Boilerplate" -__url__ = "https://www.attrs.org/" -__uri__ = __url__ -__doc__ = __description__ + " <" + __uri__ + ">" - -__author__ = "Hynek Schlawack" -__email__ = "hs@ox.cx" - -__license__ = "MIT" -__copyright__ = "Copyright (c) 2015 Hynek Schlawack" - - s = attributes = attrs ib = attr = attrib dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + +class AttrsInstance: + pass + + __all__ = [ "Attribute", + "AttrsInstance", "Factory", "NOTHING", "asdict", @@ -57,15 +49,19 @@ "attrs", "cmp_using", "converters", + "define", "evolve", "exceptions", + "field", "fields", "fields_dict", "filters", + "frozen", "get_run_validators", "has", "ib", "make_class", + "mutable", "resolve_types", "s", "set_run_validators", @@ -74,7 +70,63 @@ "validators", ] -if sys.version_info[:2] >= (3, 6): - from ._next_gen import define, field, frozen, mutable # noqa: F401 - __all__.extend(("define", "field", "frozen", "mutable")) +def _make_getattr(mod_name: str) -> Callable: + """ + Create a metadata proxy for packaging information that uses *mod_name* in + its warnings and errors. + """ + + def __getattr__(name: str) -> str: + dunder_to_metadata = { + "__title__": "Name", + "__copyright__": "", + "__version__": "version", + "__version_info__": "version", + "__description__": "summary", + "__uri__": "", + "__url__": "", + "__author__": "", + "__email__": "", + "__license__": "license", + } + if name not in dunder_to_metadata.keys(): + raise AttributeError(f"module {mod_name} has no attribute {name}") + + import sys + import warnings + + if sys.version_info < (3, 8): + from importlib_metadata import metadata + else: + from importlib.metadata import metadata + + if name != "__version_info__": + warnings.warn( + f"Accessing {mod_name}.{name} is deprecated and will be " + "removed in a future release. Use importlib.metadata directly " + "to query for attrs's packaging metadata.", + DeprecationWarning, + stacklevel=2, + ) + + meta = metadata("attrs") + if name == "__license__": + return "MIT" + elif name == "__copyright__": + return "Copyright (c) 2015 Hynek Schlawack" + elif name in ("__uri__", "__url__"): + return meta["Project-URL"].split(" ", 1)[-1] + elif name == "__version_info__": + return VersionInfo._from_version_string(meta["version"]) + elif name == "__author__": + return meta["Author-email"].rsplit(" ", 1)[0] + elif name == "__email__": + return meta["Author-email"].rsplit("<", 1)[1][:-1] + + return meta[dunder_to_metadata[name]] + + return __getattr__ + + +__getattr__ = _make_getattr(__name__) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.pyi index c0a2126..ced5a3f 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.pyi +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/__init__.pyi @@ -1,3 +1,4 @@ +import enum import sys from typing import ( @@ -8,6 +9,7 @@ from typing import ( List, Mapping, Optional, + Protocol, Sequence, Tuple, Type, @@ -22,8 +24,15 @@ from . import exceptions as exceptions from . import filters as filters from . import setters as setters from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._typing_compat import AttrsInstance_ from ._version_info import VersionInfo +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + __version__: str __version_info__: VersionInfo __title__: str @@ -39,27 +48,34 @@ _T = TypeVar("_T") _C = TypeVar("_C", bound=type) _EqOrderType = Union[bool, Callable[[Any], Any]] -_ValidatorType = Callable[[Any, Attribute[_T], _T], Any] +_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any] _ConverterType = Callable[[Any], Any] -_FilterType = Callable[[Attribute[_T], _T], bool] +_FilterType = Callable[["Attribute[_T]", _T], bool] _ReprType = Callable[[Any], str] _ReprArgType = Union[bool, _ReprType] -_OnSetAttrType = Callable[[Any, Attribute[Any], Any], Any] +_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any] _OnSetAttrArgType = Union[ _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType ] _FieldTransformer = Callable[ - [type, List[Attribute[Any]]], List[Attribute[Any]] + [type, List["Attribute[Any]"]], List["Attribute[Any]"] ] -_CompareWithType = Callable[[Any, Any], bool] # FIXME: in reality, if multiple validators are passed they must be in a list # or tuple, but those are invariant and so would prevent subtypes of # _ValidatorType from working when passed in a list or tuple. _ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] +# We subclass this here to keep the protocol's qualified name clean. +class AttrsInstance(AttrsInstance_, Protocol): + pass + +_A = TypeVar("_A", bound=AttrsInstance) # _make -- -NOTHING: object +class _Nothing(enum.Enum): + NOTHING = enum.auto() + +NOTHING = _Nothing.NOTHING # NOTE: Factory lies about its return type to make this possible: # `x: List[int] # = Factory(list)` @@ -101,6 +117,7 @@ def __dataclass_transform__( eq_default: bool = True, order_default: bool = False, kw_only_default: bool = False, + frozen_default: bool = False, field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()), ) -> Callable[[_T], _T]: ... @@ -119,6 +136,8 @@ class Attribute(Generic[_T]): type: Optional[Type[_T]] kw_only: bool on_setattr: _OnSetAttrType + alias: Optional[str] + def evolve(self, **changes: Any) -> "Attribute[Any]": ... # NOTE: We had several choices for the annotation to use for type arg: @@ -161,6 +180,7 @@ def attrib( eq: Optional[_EqOrderType] = ..., order: Optional[_EqOrderType] = ..., on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., ) -> Any: ... # This form catches an explicit None or no default and infers the type from the @@ -181,6 +201,7 @@ def attrib( eq: Optional[_EqOrderType] = ..., order: Optional[_EqOrderType] = ..., on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., ) -> _T: ... # This form catches an explicit default argument. @@ -200,6 +221,7 @@ def attrib( eq: Optional[_EqOrderType] = ..., order: Optional[_EqOrderType] = ..., on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., ) -> _T: ... # This form covers type=non-Type: e.g. forward references (str), Any @@ -219,6 +241,7 @@ def attrib( eq: Optional[_EqOrderType] = ..., order: Optional[_EqOrderType] = ..., on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., ) -> Any: ... @overload def field( @@ -235,6 +258,8 @@ def field( eq: Optional[bool] = ..., order: Optional[bool] = ..., on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., ) -> Any: ... # This form catches an explicit None or no default and infers the type from the @@ -254,6 +279,8 @@ def field( eq: Optional[_EqOrderType] = ..., order: Optional[_EqOrderType] = ..., on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., ) -> _T: ... # This form catches an explicit default argument. @@ -272,6 +299,8 @@ def field( eq: Optional[_EqOrderType] = ..., order: Optional[_EqOrderType] = ..., on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., ) -> _T: ... # This form covers type=non-Type: e.g. forward references (str), Any @@ -290,6 +319,8 @@ def field( eq: Optional[_EqOrderType] = ..., order: Optional[_EqOrderType] = ..., on_setattr: Optional[_OnSetAttrArgType] = ..., + alias: Optional[str] = ..., + type: Optional[type] = ..., ) -> Any: ... @overload @__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) @@ -317,6 +348,7 @@ def attrs( on_setattr: Optional[_OnSetAttrArgType] = ..., field_transformer: Optional[_FieldTransformer] = ..., match_args: bool = ..., + unsafe_hash: Optional[bool] = ..., ) -> _C: ... @overload @__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) @@ -344,6 +376,7 @@ def attrs( on_setattr: Optional[_OnSetAttrArgType] = ..., field_transformer: Optional[_FieldTransformer] = ..., match_args: bool = ..., + unsafe_hash: Optional[bool] = ..., ) -> Callable[[_C], _C]: ... @overload @__dataclass_transform__(field_descriptors=(attrib, field)) @@ -352,6 +385,7 @@ def define( *, these: Optional[Dict[str, Any]] = ..., repr: bool = ..., + unsafe_hash: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., slots: bool = ..., @@ -377,6 +411,7 @@ def define( *, these: Optional[Dict[str, Any]] = ..., repr: bool = ..., + unsafe_hash: Optional[bool] = ..., hash: Optional[bool] = ..., init: bool = ..., slots: bool = ..., @@ -397,21 +432,73 @@ def define( ) -> Callable[[_C], _C]: ... mutable = define -frozen = define # they differ only in their defaults - -# TODO: add support for returning NamedTuple from the mypy plugin -class _Fields(Tuple[Attribute[Any], ...]): - def __getattr__(self, name: str) -> Attribute[Any]: ... -def fields(cls: type) -> _Fields: ... -def fields_dict(cls: type) -> Dict[str, Attribute[Any]]: ... -def validate(inst: Any) -> None: ... +@overload +@__dataclass_transform__( + frozen_default=True, field_descriptors=(attrib, field) +) +def frozen( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__( + frozen_default=True, field_descriptors=(attrib, field) +) +def frozen( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + unsafe_hash: Optional[bool] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... +def fields(cls: Type[AttrsInstance]) -> Any: ... +def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... def resolve_types( - cls: _C, + cls: _A, globalns: Optional[Dict[str, Any]] = ..., localns: Optional[Dict[str, Any]] = ..., attribs: Optional[List[Attribute[Any]]] = ..., -) -> _C: ... + include_extras: bool = ..., +) -> _A: ... # TODO: add support for returning a proper attrs class from the mypy plugin # we use Any instead of _CountingAttr so that e.g. `make_class('Foo', @@ -449,7 +536,7 @@ def make_class( # https://github.com/python/typing/issues/253 # XXX: remember to fix attrs.asdict/astuple too! def asdict( - inst: Any, + inst: AttrsInstance, recurse: bool = ..., filter: Optional[_FilterType[Any]] = ..., dict_factory: Type[Mapping[Any, Any]] = ..., @@ -462,13 +549,13 @@ def asdict( # TODO: add support for returning NamedTuple from the mypy plugin def astuple( - inst: Any, + inst: AttrsInstance, recurse: bool = ..., filter: Optional[_FilterType[Any]] = ..., tuple_factory: Type[Sequence[Any]] = ..., retain_collection_types: bool = ..., ) -> Tuple[Any, ...]: ... -def has(cls: type) -> bool: ... +def has(cls: type) -> TypeGuard[Type[AttrsInstance]]: ... def assoc(inst: _T, **changes: Any) -> _T: ... def evolve(inst: _T, **changes: Any) -> _T: ... diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.py old mode 100755 new mode 100644 index 6cffa4d..d9cbe22 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.py @@ -1,10 +1,9 @@ # SPDX-License-Identifier: MIT -from __future__ import absolute_import, division, print_function import functools +import types -from ._compat import new_class from ._make import _make_ne @@ -21,22 +20,22 @@ def cmp_using( class_name="Comparable", ): """ - Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and - ``cmp`` arguments to customize field comparison. - - The resulting class will have a full set of ordering methods if - at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. - - :param Optional[callable] eq: `callable` used to evaluate equality - of two objects. - :param Optional[callable] lt: `callable` used to evaluate whether - one object is less than another object. - :param Optional[callable] le: `callable` used to evaluate whether - one object is less than or equal to another object. - :param Optional[callable] gt: `callable` used to evaluate whether - one object is greater than another object. - :param Optional[callable] ge: `callable` used to evaluate whether - one object is greater than or equal to another object. + Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, + and ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if at least + one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + :param Optional[callable] eq: `callable` used to evaluate equality of two + objects. + :param Optional[callable] lt: `callable` used to evaluate whether one + object is less than another object. + :param Optional[callable] le: `callable` used to evaluate whether one + object is less than or equal to another object. + :param Optional[callable] gt: `callable` used to evaluate whether one + object is greater than another object. + :param Optional[callable] ge: `callable` used to evaluate whether one + object is greater than or equal to another object. :param bool require_same_type: When `True`, equality and ordering methods will return `NotImplemented` if objects are not of the same type. @@ -80,7 +79,9 @@ def cmp_using( num_order_functions += 1 body["__ge__"] = _make_operator("ge", ge) - type_ = new_class(class_name, (object,), {}, lambda ns: ns.update(body)) + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) # Add same type requirement. if require_same_type: @@ -129,9 +130,9 @@ def method(self, other): return result - method.__name__ = "__%s__" % (name,) - method.__doc__ = "Return a %s b. Computed by attrs." % ( - _operation_names[name], + method.__name__ = f"__{name}__" + method.__doc__ = ( + f"Return a {_operation_names[name]} b. Computed by attrs." ) return method diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.pyi index e71aaff..f3dcdc1 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.pyi +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_cmp.pyi @@ -1,13 +1,13 @@ -from typing import Type +from typing import Any, Callable, Optional, Type -from . import _CompareWithType +_CompareWithType = Callable[[Any, Any], bool] def cmp_using( - eq: Optional[_CompareWithType], - lt: Optional[_CompareWithType], - le: Optional[_CompareWithType], - gt: Optional[_CompareWithType], - ge: Optional[_CompareWithType], - require_same_type: bool, - class_name: str, + eq: Optional[_CompareWithType] = ..., + lt: Optional[_CompareWithType] = ..., + le: Optional[_CompareWithType] = ..., + gt: Optional[_CompareWithType] = ..., + ge: Optional[_CompareWithType] = ..., + require_same_type: bool = ..., + class_name: str = ..., ) -> Type: ... diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_compat.py old mode 100755 new mode 100644 index dc0cb02..c3bf5e3 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_compat.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_compat.py @@ -1,145 +1,71 @@ # SPDX-License-Identifier: MIT -from __future__ import absolute_import, division, print_function +import inspect import platform import sys import threading import types import warnings +from collections.abc import Mapping, Sequence # noqa +from typing import _GenericAlias + -PY2 = sys.version_info[0] == 2 PYPY = platform.python_implementation() == "PyPy" -PY36 = sys.version_info[:2] >= (3, 6) -HAS_F_STRINGS = PY36 +PY_3_9_PLUS = sys.version_info[:2] >= (3, 9) PY310 = sys.version_info[:2] >= (3, 10) +PY_3_12_PLUS = sys.version_info[:2] >= (3, 12) -if PYPY or PY36: - ordered_dict = dict -else: - from collections import OrderedDict - - ordered_dict = OrderedDict - +def just_warn(*args, **kw): + warnings.warn( + "Running interpreter doesn't sufficiently support code object " + "introspection. Some features like bare super() or accessing " + "__class__ will not work with slotted classes.", + RuntimeWarning, + stacklevel=2, + ) -if PY2: - from collections import Mapping, Sequence - from UserDict import IterableUserDict - - # We 'bundle' isclass instead of using inspect as importing inspect is - # fairly expensive (order of 10-15 ms for a modern machine in 2016) - def isclass(klass): - return isinstance(klass, (type, types.ClassType)) - - def new_class(name, bases, kwds, exec_body): - """ - A minimal stub of types.new_class that we need for make_class. - """ - ns = {} - exec_body(ns) - - return type(name, bases, ns) +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ - # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. - TYPE = "type" + __slots__ = ["sig"] - def iteritems(d): - return d.iteritems() + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None - # Python 2 is bereft of a read-only dict proxy, so we make one! - class ReadOnlyDict(IterableUserDict): + def get_first_param_type(self): """ - Best-effort read-only dict wrapper. + Return the type annotation of the first argument if it's not empty. """ + if not self.sig: + return None - def __setitem__(self, key, val): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item assignment" - ) - - def update(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'update'" - ) - - def __delitem__(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item deletion" - ) - - def clear(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'clear'" - ) - - def pop(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'pop'" - ) - - def popitem(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'popitem'" - ) - - def setdefault(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'setdefault'" - ) - - def __repr__(self): - # Override to be identical to the Python 3 version. - return "mappingproxy(" + repr(self.data) + ")" - - def metadata_proxy(d): - res = ReadOnlyDict() - res.data.update(d) # We blocked update, so we have to do it like this. - return res - - def just_warn(*args, **kw): # pragma: no cover - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation -else: # Python 3 and later. - from collections.abc import Mapping, Sequence # noqa + return None - def just_warn(*args, **kw): + def get_return_type(self): """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. + Return the return type if it's not empty. """ - warnings.warn( - "Running interpreter doesn't sufficiently support code object " - "introspection. Some features like bare super() or accessing " - "__class__ will not work with slotted classes.", - RuntimeWarning, - stacklevel=2, - ) + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation - def isclass(klass): - return isinstance(klass, type) - - TYPE = "class" - - def iteritems(d): - return d.items() - - new_class = types.new_class - - def metadata_proxy(d): - return types.MappingProxyType(dict(d)) + return None def make_set_closure_cell(): @@ -157,40 +83,34 @@ def set_closure_cell(cell, value): # Otherwise gotta do it the hard way. - # Create a function that will set its first cellvar to `value`. - def set_first_cellvar_to(value): - x = value - return + try: + if sys.version_info >= (3, 8): - # This function will be eliminated as dead code, but - # not before its reference to `x` forces `x` to be - # represented as a closure cell rather than a local. - def force_x_to_be_a_cell(): # pragma: no cover - return x + def set_closure_cell(cell, value): + cell.cell_contents = value - try: - # Extract the code object and make sure our assumptions about - # the closure behavior are correct. - if PY2: - co = set_first_cellvar_to.func_code else: + # Create a function that will set its first cellvar to `value`. + def set_first_cellvar_to(value): + x = value + return + + # This function will be eliminated as dead code, but + # not before its reference to `x` forces `x` to be + # represented as a closure cell rather than a local. + def force_x_to_be_a_cell(): # pragma: no cover + return x + + # Extract the code object and make sure our assumptions about + # the closure behavior are correct. co = set_first_cellvar_to.__code__ - if co.co_cellvars != ("x",) or co.co_freevars != (): - raise AssertionError # pragma: no cover + if co.co_cellvars != ("x",) or co.co_freevars != (): + raise AssertionError # pragma: no cover - # Convert this code object to a code object that sets the - # function's first _freevar_ (not cellvar) to the argument. - if sys.version_info >= (3, 8): - # CPython 3.8+ has an incompatible CodeType signature - # (added a posonlyargcount argument) but also added - # CodeType.replace() to do this without counting parameters. - set_first_freevar_code = co.replace( - co_cellvars=co.co_freevars, co_freevars=co.co_cellvars - ) - else: + # Convert this code object to a code object that sets the + # function's first _freevar_ (not cellvar) to the argument. args = [co.co_argcount] - if not PY2: - args.append(co.co_kwonlyargcount) + args.append(co.co_kwonlyargcount) args.extend( [ co.co_nlocals, @@ -211,15 +131,15 @@ def force_x_to_be_a_cell(): # pragma: no cover ) set_first_freevar_code = types.CodeType(*args) - def set_closure_cell(cell, value): - # Create a function using the set_first_freevar_code, - # whose first closure cell is `cell`. Calling it will - # change the value of that cell. - setter = types.FunctionType( - set_first_freevar_code, {}, "setter", (), (cell,) - ) - # And call it to set the cell. - setter(value) + def set_closure_cell(cell, value): + # Create a function using the set_first_freevar_code, + # whose first closure cell is `cell`. Calling it will + # change the value of that cell. + setter = types.FunctionType( + set_first_freevar_code, {}, "setter", (), (cell,) + ) + # And call it to set the cell. + setter(value) # Make sure it works on this interpreter: def make_func_with_cell(): @@ -230,10 +150,7 @@ def func(): return func - if PY2: - cell = make_func_with_cell().func_closure[0] - else: - cell = make_func_with_cell().__closure__[0] + cell = make_func_with_cell().__closure__[0] set_closure_cell(cell, 100) if cell.cell_contents != 100: raise AssertionError # pragma: no cover @@ -259,3 +176,10 @@ def func(): # don't have a direct reference to the thread-local in their globals dict. # If they have such a reference, it breaks cloudpickle. repr_context = threading.local() + + +def get_generic_base(cl): + """If this is a generic class (A[str]), return the generic base for it.""" + if cl.__class__ is _GenericAlias: + return cl.__origin__ + return None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_config.py old mode 100755 new mode 100644 index fc9be29..96d4200 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_config.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_config.py @@ -1,7 +1,5 @@ # SPDX-License-Identifier: MIT -from __future__ import absolute_import, division, print_function - __all__ = ["set_run_validators", "get_run_validators"] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_funcs.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_funcs.py old mode 100755 new mode 100644 index 4c90085..7f5d961 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_funcs.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_funcs.py @@ -1,10 +1,9 @@ # SPDX-License-Identifier: MIT -from __future__ import absolute_import, division, print_function import copy -from ._compat import iteritems +from ._compat import PY_3_9_PLUS, get_generic_base from ._make import NOTHING, _obj_setattr, fields from .exceptions import AttrsAttributeNotFoundError @@ -18,13 +17,13 @@ def asdict( value_serializer=None, ): """ - Return the ``attrs`` attribute values of *inst* as a dict. + Return the *attrs* attribute values of *inst* as a dict. - Optionally recurse into other ``attrs``-decorated classes. + Optionally recurse into other *attrs*-decorated classes. - :param inst: Instance of an ``attrs``-decorated class. + :param inst: Instance of an *attrs*-decorated class. :param bool recurse: Recurse into classes that are also - ``attrs``-decorated. + *attrs*-decorated. :param callable filter: A callable whose return code determines whether an attribute or element is included (``True``) or dropped (``False``). Is called with the `attrs.Attribute` as the first argument and the @@ -42,7 +41,7 @@ def asdict( :rtype: return type of *dict_factory* - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* class. .. versionadded:: 16.0.0 *dict_factory* @@ -107,7 +106,7 @@ def asdict( value_serializer=value_serializer, ), ) - for kk, vv in iteritems(v) + for kk, vv in v.items() ) else: rv[a.name] = v @@ -179,7 +178,7 @@ def _asdict_anything( value_serializer=value_serializer, ), ) - for kk, vv in iteritems(val) + for kk, vv in val.items() ) else: rv = val @@ -197,13 +196,13 @@ def astuple( retain_collection_types=False, ): """ - Return the ``attrs`` attribute values of *inst* as a tuple. + Return the *attrs* attribute values of *inst* as a tuple. - Optionally recurse into other ``attrs``-decorated classes. + Optionally recurse into other *attrs*-decorated classes. - :param inst: Instance of an ``attrs``-decorated class. + :param inst: Instance of an *attrs*-decorated class. :param bool recurse: Recurse into classes that are also - ``attrs``-decorated. + *attrs*-decorated. :param callable filter: A callable whose return code determines whether an attribute or element is included (``True``) or dropped (``False``). Is called with the `attrs.Attribute` as the first argument and the @@ -217,7 +216,7 @@ def astuple( :rtype: return type of *tuple_factory* - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* class. .. versionadded:: 16.2.0 @@ -278,7 +277,7 @@ def astuple( if has(vv.__class__) else vv, ) - for kk, vv in iteritems(v) + for kk, vv in v.items() ) ) else: @@ -291,28 +290,48 @@ def astuple( def has(cls): """ - Check whether *cls* is a class with ``attrs`` attributes. + Check whether *cls* is a class with *attrs* attributes. :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. :rtype: bool """ - return getattr(cls, "__attrs_attrs__", None) is not None + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is not None: + return True + + # No attrs, maybe it's a specialized generic (A[str])? + generic_base = get_generic_base(cls) + if generic_base is not None: + generic_attrs = getattr(generic_base, "__attrs_attrs__", None) + if generic_attrs is not None: + # Stick it on here for speed next time. + cls.__attrs_attrs__ = generic_attrs + return generic_attrs is not None + return False def assoc(inst, **changes): """ Copy *inst* and apply *changes*. - :param inst: Instance of a class with ``attrs`` attributes. + This is different from `evolve` that applies the changes to the arguments + that create the new instance. + + `evolve`'s behavior is preferable, but there are `edge cases`_ where it + doesn't work. Therefore `assoc` is deprecated, but will not be removed. + + .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 + + :param inst: Instance of a class with *attrs* attributes. :param changes: Keyword changes in the new copy. :return: A copy of inst with *changes* incorporated. - :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't - be found on *cls*. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + :raise attrs.exceptions.AttrsAttributeNotFoundError: If *attr_name* + couldn't be found on *cls*. + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* class. .. deprecated:: 17.1.0 @@ -320,57 +339,83 @@ def assoc(inst, **changes): This function will not be removed du to the slightly different approach compared to `attrs.evolve`. """ - import warnings - - warnings.warn( - "assoc is deprecated and will be removed after 2018/01.", - DeprecationWarning, - stacklevel=2, - ) new = copy.copy(inst) attrs = fields(inst.__class__) - for k, v in iteritems(changes): + for k, v in changes.items(): a = getattr(attrs, k, NOTHING) if a is NOTHING: raise AttrsAttributeNotFoundError( - "{k} is not an attrs attribute on {cl}.".format( - k=k, cl=new.__class__ - ) + f"{k} is not an attrs attribute on {new.__class__}." ) _obj_setattr(new, k, v) return new -def evolve(inst, **changes): +def evolve(*args, **changes): """ - Create a new instance, based on *inst* with *changes* applied. + Create a new instance, based on the first positional argument with + *changes* applied. - :param inst: Instance of a class with ``attrs`` attributes. + :param inst: Instance of a class with *attrs* attributes. :param changes: Keyword changes in the new copy. :return: A copy of inst with *changes* incorporated. :raise TypeError: If *attr_name* couldn't be found in the class ``__init__``. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* class. - .. versionadded:: 17.1.0 + .. versionadded:: 17.1.0 + .. deprecated:: 23.1.0 + It is now deprecated to pass the instance using the keyword argument + *inst*. It will raise a warning until at least April 2024, after which + it will become an error. Always pass the instance as a positional + argument. """ + # Try to get instance by positional argument first. + # Use changes otherwise and warn it'll break. + if args: + try: + (inst,) = args + except ValueError: + raise TypeError( + f"evolve() takes 1 positional argument, but {len(args)} " + "were given" + ) from None + else: + try: + inst = changes.pop("inst") + except KeyError: + raise TypeError( + "evolve() missing 1 required positional argument: 'inst'" + ) from None + + import warnings + + warnings.warn( + "Passing the instance per keyword argument is deprecated and " + "will stop working in, or after, April 2024.", + DeprecationWarning, + stacklevel=2, + ) + cls = inst.__class__ attrs = fields(cls) for a in attrs: if not a.init: continue attr_name = a.name # To deal with private attributes. - init_name = attr_name if attr_name[0] != "_" else attr_name[1:] + init_name = a.alias if init_name not in changes: changes[init_name] = getattr(inst, attr_name) return cls(**changes) -def resolve_types(cls, globalns=None, localns=None, attribs=None): +def resolve_types( + cls, globalns=None, localns=None, attribs=None, include_extras=True +): """ Resolve any strings and forward annotations in type annotations. @@ -389,10 +434,14 @@ def resolve_types(cls, globalns=None, localns=None, attribs=None): :param Optional[dict] localns: Dictionary containing local variables. :param Optional[list] attribs: List of attribs for the given class. This is necessary when calling from inside a ``field_transformer`` - since *cls* is not an ``attrs`` class yet. + since *cls* is not an *attrs* class yet. + :param bool include_extras: Resolve more accurately, if possible. + Pass ``include_extras`` to ``typing.get_hints``, if supported by the + typing module. On supported Python versions (3.9+), this resolves the + types more accurately. :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* class and you didn't pass any attribs. :raise NameError: If types cannot be resolved because of missing variables. @@ -402,6 +451,7 @@ class and you didn't pass any attribs. .. versionadded:: 20.1.0 .. versionadded:: 21.1.0 *attribs* + .. versionadded:: 23.1.0 *include_extras* """ # Since calling get_type_hints is expensive we cache whether we've @@ -409,7 +459,12 @@ class and you didn't pass any attribs. if getattr(cls, "__attrs_types_resolved__", None) != cls: import typing - hints = typing.get_type_hints(cls, globalns=globalns, localns=localns) + kwargs = {"globalns": globalns, "localns": localns} + + if PY_3_9_PLUS: + kwargs["include_extras"] = include_extras + + hints = typing.get_type_hints(cls, **kwargs) for field in fields(cls) if attribs is None else attribs: if field.name in hints: # Since fields have been frozen we must work around it. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_make.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_make.py old mode 100755 new mode 100644 index d46f8a3..d72f738 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_make.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_make.py @@ -1,12 +1,11 @@ # SPDX-License-Identifier: MIT -from __future__ import absolute_import, division, print_function - import copy -import inspect +import enum import linecache import sys -import warnings +import types +import typing from operator import itemgetter @@ -14,37 +13,23 @@ # having the thread-local in the globals here. from . import _compat, _config, setters from ._compat import ( - HAS_F_STRINGS, - PY2, PY310, - PYPY, - isclass, - iteritems, - metadata_proxy, - new_class, - ordered_dict, + _AnnotationExtractor, + get_generic_base, set_closure_cell, ) from .exceptions import ( DefaultAlreadySetError, FrozenInstanceError, NotAnAttrsClassError, - PythonTooOldError, UnannotatedAttributeError, ) -if not PY2: - import typing - - # This is used at least twice, so cache it here. _obj_setattr = object.__setattr__ _init_converter_pat = "__attr_converter_%s" -_init_factory_pat = "__attr_factory_{}" -_tuple_property_pat = ( - " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" -) +_init_factory_pat = "__attr_factory_%s" _classvar_prefixes = ( "typing.ClassVar", "t.ClassVar", @@ -56,7 +41,7 @@ # (when slots=True) _hash_cache_field = "_attrs_cached_hash" -_empty_metadata_singleton = metadata_proxy({}) +_empty_metadata_singleton = types.MappingProxyType({}) # Unique object for unequivocal getattr() defaults. _sentinel = object() @@ -64,21 +49,18 @@ _ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) -class _Nothing(object): +class _Nothing(enum.Enum): """ - Sentinel class to indicate the lack of a value when ``None`` is ambiguous. + Sentinel to indicate the lack of a value when ``None`` is ambiguous. - ``_Nothing`` is a singleton. There is only ever one of it. + If extending attrs, you can use ``typing.Literal[NOTHING]`` to show + that a value may be ``NOTHING``. .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. """ - _singleton = None - - def __new__(cls): - if _Nothing._singleton is None: - _Nothing._singleton = super(_Nothing, cls).__new__(cls) - return _Nothing._singleton + NOTHING = enum.auto() def __repr__(self): return "NOTHING" @@ -86,11 +68,8 @@ def __repr__(self): def __bool__(self): return False - def __len__(self): - return 0 # __bool__ for Python 2 - -NOTHING = _Nothing() +NOTHING = _Nothing.NOTHING """ Sentinel to indicate the lack of a value when ``None`` is ambiguous. """ @@ -108,17 +87,8 @@ class _CacheHashWrapper(int): See GH #613 for more details. """ - if PY2: - # For some reason `type(None)` isn't callable in Python 2, but we don't - # actually need a constructor for None objects, we just need any - # available function that returns None. - def __reduce__(self, _none_constructor=getattr, _args=(0, "", None)): - return _none_constructor, _args - - else: - - def __reduce__(self, _none_constructor=type(None), _args=()): - return _none_constructor, _args + def __reduce__(self, _none_constructor=type(None), _args=()): + return _none_constructor, _args def attrib( @@ -136,6 +106,7 @@ def attrib( eq=None, order=None, on_setattr=None, + alias=None, ): """ Create a new attribute on a class. @@ -143,9 +114,12 @@ def attrib( .. warning:: Does *not* do anything unless the class is also decorated with - `attr.s`! + `attr.s` / `attrs.define` / et cetera! - :param default: A value that is used if an ``attrs``-generated ``__init__`` + Please consider using `attrs.field` in new code (``attr.ib`` will *never* + go away, though). + + :param default: A value that is used if an *attrs*-generated ``__init__`` is used and no value is passed while instantiating or the attribute is excluded using ``init=False``. @@ -164,7 +138,7 @@ def attrib( :param callable factory: Syntactic sugar for ``default=attr.Factory(factory)``. - :param validator: `callable` that is called by ``attrs``-generated + :param validator: `callable` that is called by *attrs*-generated ``__init__`` methods after the instance has been initialized. They receive the initialized instance, the :func:`~attrs.Attribute`, and the passed value. @@ -176,7 +150,7 @@ def attrib( all pass. Validators can be globally disabled and re-enabled using - `get_run_validators`. + `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. The validator can also be set using decorator notation as shown below. @@ -218,31 +192,33 @@ def attrib( value. In that case this attributed is unconditionally initialized with the specified default value or factory. :param callable converter: `callable` that is called by - ``attrs``-generated ``__init__`` methods to convert attribute's value + *attrs*-generated ``__init__`` methods to convert attribute's value to the desired format. It is given the passed-in value, and the returned value will be used as the new value of the attribute. The value is converted before being passed to the validator, if any. :param metadata: An arbitrary mapping, to be used by third-party - components. See `extending_metadata`. - :param type: The type of the attribute. In Python 3.6 or greater, the - preferred method to specify the type is using a variable annotation - (see `PEP 526 `_). + components. See `extending-metadata`. + + :param type: The type of the attribute. Nowadays, the preferred method to + specify the type is using a variable annotation (see :pep:`526`). This argument is provided for backward compatibility. Regardless of the approach used, the type will be stored on ``Attribute.type``. - Please note that ``attrs`` doesn't do anything with this metadata by + Please note that *attrs* doesn't do anything with this metadata by itself. You can use it as part of your own code or for `static type checking `. - :param kw_only: Make this attribute keyword-only (Python 3+) - in the generated ``__init__`` (if ``init`` is ``False``, this - parameter is ignored). + :param kw_only: Make this attribute keyword-only in the generated + ``__init__`` (if ``init`` is ``False``, this parameter is ignored). :param on_setattr: Allows to overwrite the *on_setattr* setting from `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this attribute -- regardless of the setting in `attr.s`. :type on_setattr: `callable`, or a list of callables, or `None`, or `attrs.setters.NO_OP` + :param Optional[str] alias: Override this attribute's parameter name in the + generated ``__init__`` method. If left `None`, default to ``name`` + stripped of leading underscores. See `private-attributes`. .. versionadded:: 15.2.0 *convert* .. versionadded:: 16.3.0 *metadata* @@ -265,6 +241,7 @@ def attrib( .. versionchanged:: 21.1.0 *eq*, *order*, and *cmp* also accept a custom callable .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 22.2.0 *alias* """ eq, eq_key, order, order_key = _determine_attrib_eq_order( cmp, eq, order, True @@ -314,6 +291,7 @@ def attrib( order=order, order_key=order_key, on_setattr=on_setattr, + alias=alias, ) @@ -325,13 +303,11 @@ def _compile_and_eval(script, globs, locs=None, filename=""): eval(bytecode, globs, locs) -def _make_method(name, script, filename, globs=None): +def _make_method(name, script, filename, globs): """ Create the method with the script given and return the method object. """ locs = {} - if globs is None: - globs = {} # In order of debuggers like PDB being able to step through the code, # we add a fake linecache entry. @@ -348,7 +324,7 @@ def _make_method(name, script, filename, globs=None): if old_val == linecache_tuple: break else: - filename = "{}-{}>".format(base_filename[:-1], count) + filename = f"{base_filename[:-1]}-{count}>" count += 1 _compile_and_eval(script, globs, locs, filename) @@ -366,15 +342,15 @@ class MyClassAttributes(tuple): __slots__ = () x = property(itemgetter(0)) """ - attr_class_name = "{}Attributes".format(cls_name) + attr_class_name = f"{cls_name}Attributes" attr_class_template = [ - "class {}(tuple):".format(attr_class_name), + f"class {attr_class_name}(tuple):", " __slots__ = ()", ] if attr_names: for i, attr_name in enumerate(attr_names): attr_class_template.append( - _tuple_property_pat.format(index=i, attr_name=attr_name) + f" {attr_name} = _attrs_property(_attrs_itemgetter({i}))" ) else: attr_class_template.append(" pass") @@ -418,8 +394,6 @@ def _is_class_var(annot): def _has_own_attribute(cls, attrib_name): """ Check whether *cls* defines *attrib_name* (and doesn't just inherit it). - - Requires Python 3. """ attr = getattr(cls, attrib_name, _sentinel) if attr is _sentinel: @@ -443,13 +417,6 @@ def _get_annotations(cls): return {} -def _counter_getter(e): - """ - Key function for sorting to avoid re-creating a lambda for every class. - """ - return e[1].counter - - def _collect_base_attrs(cls, taken_attr_names): """ Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. @@ -526,10 +493,7 @@ def _transform_attrs( anns = _get_annotations(cls) if these is not None: - ca_list = [(name, ca) for name, ca in iteritems(these)] - - if not isinstance(these, ordered_dict): - ca_list.sort(key=_counter_getter) + ca_list = [(name, ca) for name, ca in these.items()] elif auto_attribs is True: ca_names = { name @@ -601,7 +565,7 @@ def _transform_attrs( if had_default is True and a.default is NOTHING: raise ValueError( "No mandatory attributes allowed after an attribute with a " - "default value or factory. Attribute in question: %r" % (a,) + f"default value or factory. Attribute in question: {a!r}" ) if had_default is False and a.default is not NOTHING: @@ -610,6 +574,14 @@ def _transform_attrs( if field_transformer is not None: attrs = field_transformer(cls, attrs) + # Resolve default field alias after executing field_transformer. + # This allows field_transformer to differentiate between explicit vs + # default aliases and supply their own defaults. + attrs = [ + a.evolve(alias=_default_init_alias_for(a.name)) if not a.alias else a + for a in attrs + ] + # Create AttrsClass *after* applying the field_transformer since it may # add or remove attributes! attr_names = [a.name for a in attrs] @@ -618,28 +590,19 @@ def _transform_attrs( return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) -if PYPY: - - def _frozen_setattrs(self, name, value): - """ - Attached to frozen classes as __setattr__. - """ - if isinstance(self, BaseException) and name in ( - "__cause__", - "__context__", - ): - BaseException.__setattr__(self, name, value) - return - - raise FrozenInstanceError() - -else: +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + "__traceback__", + ): + BaseException.__setattr__(self, name, value) + return - def _frozen_setattrs(self, name, value): - """ - Attached to frozen classes as __setattr__. - """ - raise FrozenInstanceError() + raise FrozenInstanceError() def _frozen_delattrs(self, name): @@ -649,7 +612,7 @@ def _frozen_delattrs(self, name): raise FrozenInstanceError() -class _ClassBuilder(object): +class _ClassBuilder: """ Iteratively build *one* class. """ @@ -703,7 +666,7 @@ def __init__( self._cls = cls self._cls_dict = dict(cls.__dict__) if slots else {} self._attrs = attrs - self._base_names = set(a.name for a in base_attrs) + self._base_names = {a.name for a in base_attrs} self._base_attr_map = base_map self._attr_names = tuple(a.name for a in attrs) self._slots = slots @@ -760,17 +723,35 @@ def __init__( ) = self._make_getstate_setstate() def __repr__(self): - return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) + return f"<_ClassBuilder(cls={self._cls.__name__})>" - def build_class(self): - """ - Finalize class based on the accumulated configuration. + if PY310: + import abc + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + + return self.abc.update_abstractmethods( + self._patch_original_class() + ) + + else: + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() - Builder cannot be used after calling this method. - """ - if self._slots is True: - return self._create_slots_class() - else: return self._patch_original_class() def _patch_original_class(self): @@ -807,7 +788,7 @@ def _patch_original_class(self): cls.__attrs_own_setattr__ = False if not self._has_custom_setattr: - cls.__setattr__ = object.__setattr__ + cls.__setattr__ = _obj_setattr return cls @@ -817,7 +798,7 @@ def _create_slots_class(self): """ cd = { k: v - for k, v in iteritems(self._cls_dict) + for k, v in self._cls_dict.items() if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") } @@ -835,7 +816,7 @@ def _create_slots_class(self): if not self._has_custom_setattr: for base_cls in self._cls.__bases__: if base_cls.__dict__.get("__attrs_own_setattr__", False): - cd["__setattr__"] = object.__setattr__ + cd["__setattr__"] = _obj_setattr break # Traverse the MRO to collect existing slots @@ -868,11 +849,11 @@ def _create_slots_class(self): slot_names = [name for name in names if name not in base_names] # There are slots for attributes from current class # that are defined in parent classes. - # As their descriptors may be overriden by a child class, + # As their descriptors may be overridden by a child class, # we collect them here and update the class dict reused_slots = { slot: slot_descriptor - for slot, slot_descriptor in iteritems(existing_slots) + for slot, slot_descriptor in existing_slots.items() if slot in slot_names } slot_names = [name for name in slot_names if name not in reused_slots] @@ -881,16 +862,14 @@ def _create_slots_class(self): slot_names.append(_hash_cache_field) cd["__slots__"] = tuple(slot_names) - qualname = getattr(self._cls, "__qualname__", None) - if qualname is not None: - cd["__qualname__"] = qualname + cd["__qualname__"] = self._cls.__qualname__ # Create new class based on old class and our methods. cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) # The following is a fix for - # . On Python 3, - # if a method mentions `__class__` or uses the no-arg super(), the + # . + # If a method mentions `__class__` or uses the no-arg super(), the # compiler will bake a reference to the class in the method itself # as `method.__closure__`. Since we replace the class with a # clone, we rewrite these references so it keeps working. @@ -951,7 +930,7 @@ def slots_getstate(self): """ Automatically created by attrs. """ - return tuple(getattr(self, name) for name in state_attr_names) + return {name: getattr(self, name) for name in state_attr_names} hash_caching_enabled = self._cache_hash @@ -959,9 +938,16 @@ def slots_setstate(self, state): """ Automatically created by attrs. """ - __bound_setattr = _obj_setattr.__get__(self, Attribute) - for name, value in zip(state_attr_names, state): - __bound_setattr(name, value) + __bound_setattr = _obj_setattr.__get__(self) + if isinstance(state, tuple): + # Backward compatibility with attrs instances pickled with + # attrs versions before v22.2.0 which stored tuples. + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + else: + for name in state_attr_names: + if name in state: + __bound_setattr(name, state[name]) # The hash code cache is not included when the object is # serialized, but it still needs to be initialized to None to @@ -1106,8 +1092,9 @@ def _add_method_dunders(self, method): pass try: - method.__doc__ = "Method generated by attrs for class %s." % ( - self._cls.__qualname__, + method.__doc__ = ( + "Method generated by attrs for class " + f"{self._cls.__qualname__}." ) except AttributeError: pass @@ -1115,12 +1102,6 @@ def _add_method_dunders(self, method): return method -_CMP_DEPRECATION = ( - "The usage of `cmp` is deprecated and will be removed on or after " - "2021-06-01. Please use `eq` and `order` instead." -) - - def _determine_attrs_eq_order(cmp, eq, order, default_eq): """ Validate the combination of *cmp*, *eq*, and *order*. Derive the effective @@ -1199,8 +1180,6 @@ def _determine_whether_to_implement( whose presence signal that the user has implemented it themselves. Return *default* if no reason for either for or against is found. - - auto_detect must be False on Python 2. """ if flag is True or flag is False: return flag @@ -1240,24 +1219,24 @@ def attrs( on_setattr=None, field_transformer=None, match_args=True, + unsafe_hash=None, ): r""" - A class decorator that adds `dunder - `_\ -methods according to the + A class decorator that adds :term:`dunder methods` according to the specified attributes using `attr.ib` or the *these* argument. + Please consider using `attrs.define` / `attrs.frozen` in new code + (``attr.s`` will *never* go away, though). + :param these: A dictionary of name to `attr.ib` mappings. This is useful to avoid the definition of your attributes within the class body because you can't (e.g. if you want to add ``__repr__`` methods to Django models) or don't want to. - If *these* is not ``None``, ``attrs`` will *not* search the class body + If *these* is not ``None``, *attrs* will *not* search the class body for attributes and will *not* remove any attributes from it. - If *these* is an ordered dict (`dict` on Python 3.6+, - `collections.OrderedDict` otherwise), the order is deduced from - the order of the attributes inside *these*. Otherwise the order - of the definition of the attributes is used. + The order is deduced from the order of the attributes inside *these*. :type these: `dict` of `str` to `attr.ib` @@ -1271,14 +1250,14 @@ def attrs( inherited from some base class). So for example by implementing ``__eq__`` on a class yourself, - ``attrs`` will deduce ``eq=False`` and will create *neither* + *attrs* will deduce ``eq=False`` and will create *neither* ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible ``__ne__`` by default, so it *should* be enough to only implement ``__eq__`` in most cases). .. warning:: - If you prevent ``attrs`` from creating the ordering methods for you + If you prevent *attrs* from creating the ordering methods for you (``order=False``, e.g. by implementing ``__le__``), it becomes *your* responsibility to make sure its ordering is sound. The best way is to use the `functools.total_ordering` decorator. @@ -1287,18 +1266,15 @@ def attrs( Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, *cmp*, or *hash* overrides whatever *auto_detect* would determine. - *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises - an `attrs.exceptions.PythonTooOldError`. - :param bool repr: Create a ``__repr__`` method with a human readable - representation of ``attrs`` attributes.. + representation of *attrs* attributes.. :param bool str: Create a ``__str__`` method that is identical to ``__repr__``. This is usually not necessary except for `Exception`\ s. :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` and ``__ne__`` methods that check two instances for equality. - They compare the instances as if they were tuples of their ``attrs`` + They compare the instances as if they were tuples of their *attrs* attributes if and only if the types of both classes are *identical*! :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` methods that behave like *eq* above and @@ -1306,10 +1282,10 @@ def attrs( *eq*. :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the same value. Must not be mixed with *eq* or *order*. - :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method - is generated according how *eq* and *frozen* are set. + :param Optional[bool] unsafe_hash: If ``None`` (default), the ``__hash__`` + method is generated according how *eq* and *frozen* are set. - 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. + 1. If *both* are True, *attrs* will generate a ``__hash__`` for you. 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to None, marking it unhashable (which it is). 3. If *eq* is False, ``__hash__`` will be left untouched meaning the @@ -1317,7 +1293,7 @@ def attrs( ``object``, this means it will fall back to id-based hashing.). Although not recommended, you can decide for yourself and force - ``attrs`` to create one (e.g. if the class is immutable even though you + *attrs* to create one (e.g. if the class is immutable even though you didn't freeze it programmatically) by passing ``True`` or not. Both of these cases are rather special and should be used carefully. @@ -1325,8 +1301,10 @@ def attrs( `object.__hash__`, and the `GitHub issue that led to the default \ behavior `_ for more details. + :param Optional[bool] hash: Alias for *unsafe_hash*. *unsafe_hash* takes + precedence. :param bool init: Create a ``__init__`` method that initializes the - ``attrs`` attributes. Leading underscores are stripped for the argument + *attrs* attributes. Leading underscores are stripped for the argument name. If a ``__attrs_pre_init__`` method exists on the class, it will be called before the class is initialized. If a ``__attrs_post_init__`` method exists on the class, it will be called after the class is fully @@ -1336,13 +1314,13 @@ def attrs( injected instead. This allows you to define a custom ``__init__`` method that can do pre-init work such as ``super().__init__()``, and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. - :param bool slots: Create a `slotted class ` that's more - memory-efficient. Slotted classes are generally superior to the default - dict classes, but have some gotchas you should know about, so we - encourage you to read the `glossary entry `. + :param bool slots: Create a :term:`slotted class ` that's + more memory-efficient. Slotted classes are generally superior to the + default dict classes, but have some gotchas you should know about, so + we encourage you to read the :term:`glossary entry `. :param bool frozen: Make instances immutable after initialization. If someone attempts to modify a frozen instance, - `attr.exceptions.FrozenInstanceError` is raised. + `attrs.exceptions.FrozenInstanceError` is raised. .. note:: @@ -1364,10 +1342,10 @@ def attrs( :param bool weakref_slot: Make instances weak-referenceable. This has no effect unless ``slots`` is also enabled. - :param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated - attributes (Python 3.6 and later only) from the class body. + :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated + attributes from the class body. - In this case, you **must** annotate every field. If ``attrs`` + In this case, you **must** annotate every field. If *attrs* encounters a field that is set to an `attr.ib` but lacks a type annotation, an `attr.exceptions.UnannotatedAttributeError` is raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't @@ -1383,15 +1361,14 @@ def attrs( .. warning:: For features that use the attribute name to create decorators (e.g. - `validators `), you still *must* assign `attr.ib` to - them. Otherwise Python will either not find the name or try to use - the default value to call e.g. ``validator`` on it. + :ref:`validators `), you still *must* assign `attr.ib` + to them. Otherwise Python will either not find the name or try to + use the default value to call e.g. ``validator`` on it. These errors can be quite confusing and probably the most common bug report on our bug tracker. - .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ - :param bool kw_only: Make all attributes keyword-only (Python 3+) + :param bool kw_only: Make all attributes keyword-only in the generated ``__init__`` (if ``init`` is ``False``, this parameter is ignored). :param bool cache_hash: Ensure that the object's hash code is computed @@ -1407,14 +1384,14 @@ def attrs( class: - the values for *eq*, *order*, and *hash* are ignored and the - instances compare and hash by the instance's ids (N.B. ``attrs`` will + instances compare and hash by the instance's ids (N.B. *attrs* will *not* remove existing implementations of ``__hash__`` or the equality methods. It just won't add own ones.), - all attributes that are either passed into ``__init__`` or have a default value are additionally available as a tuple in the ``args`` attribute, - the value of *str* is ignored leaving ``__str__`` to base classes. - :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs`` + :param bool collect_by_mro: Setting this to `True` fixes the way *attrs* collects attributes from base classes. The default behavior is incorrect in certain cases of multiple inheritance. It should be on by default but is kept off for backward-compatibility. @@ -1448,19 +1425,20 @@ def attrs( If a list of callables is passed, they're automatically wrapped in an `attrs.setters.pipe`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` :param Optional[callable] field_transformer: A function that is called with the original class object and all - fields right before ``attrs`` finalizes the class. You can use + fields right before *attrs* finalizes the class. You can use this, e.g., to automatically add converters or validators to fields based on their types. See `transform-fields` for more details. :param bool match_args: If `True` (default), set ``__match_args__`` on the class to support - `PEP 634 `_ (Structural - Pattern Matching). It is a tuple of all positional-only ``__init__`` - parameter names on Python 3.10 and later. Ignored on older Python - versions. + :pep:`634` (Structural Pattern Matching). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and later. + Ignored on older Python versions. .. versionadded:: 16.0.0 *slots* .. versionadded:: 16.1.0 *frozen* @@ -1496,23 +1474,19 @@ def attrs( .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` .. versionchanged:: 21.1.0 *cmp* undeprecated .. versionadded:: 21.3.0 *match_args* + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). """ - if auto_detect and PY2: - raise PythonTooOldError( - "auto_detect only works on Python 3 and later." - ) - eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) - hash_ = hash # work around the lack of nonlocal + + # unsafe_hash takes precedence due to PEP 681. + if unsafe_hash is not None: + hash = unsafe_hash if isinstance(on_setattr, (list, tuple)): on_setattr = setters.pipe(*on_setattr) def wrap(cls): - - if getattr(cls, "__class__", None) is None: - raise TypeError("attrs only works with new-style classes.") - is_frozen = frozen or _has_frozen_base_class(cls) is_exc = auto_exc is True and issubclass(cls, BaseException) has_own_setattr = auto_detect and _has_own_attribute( @@ -1563,14 +1537,14 @@ def wrap(cls): builder.add_setattr() + nonlocal hash if ( - hash_ is None + hash is None and auto_detect is True and _has_own_attribute(cls, "__hash__") ): hash = False - else: - hash = hash_ + if hash is not True and hash is not False and hash is not None: # Can't use `hash in` because 1 == True for example. raise TypeError( @@ -1636,39 +1610,22 @@ def wrap(cls): """ -if PY2: - - def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return ( - getattr(cls.__setattr__, "__module__", None) - == _frozen_setattrs.__module__ - and cls.__setattr__.__name__ == _frozen_setattrs.__name__ - ) - -else: - - def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return cls.__setattr__ == _frozen_setattrs +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs def _generate_unique_filename(cls, func_name): """ Create a "filename" suitable for a function being generated. """ - unique_filename = "".format( - func_name, - cls.__module__, - getattr(cls, "__qualname__", cls.__name__), + return ( + f"" ) - return unique_filename def _make_hash(cls, attrs, frozen, cache_hash): @@ -1680,6 +1637,8 @@ def _make_hash(cls, attrs, frozen, cache_hash): unique_filename = _generate_unique_filename(cls, "hash") type_hash = hash(unique_filename) + # If eq is custom generated, we need to include the functions in globs + globs = {} hash_def = "def __hash__(self" hash_func = "hash((" @@ -1687,8 +1646,7 @@ def _make_hash(cls, attrs, frozen, cache_hash): if not cache_hash: hash_def += "):" else: - if not PY2: - hash_def += ", *" + hash_def += ", *" hash_def += ( ", _cache_wrapper=" @@ -1709,32 +1667,39 @@ def append_hash_computation_lines(prefix, indent): method_lines.extend( [ indent + prefix + hash_func, - indent + " %d," % (type_hash,), + indent + f" {type_hash},", ] ) for a in attrs: - method_lines.append(indent + " self.%s," % a.name) + if a.eq_key: + cmp_name = f"_{a.name}_key" + globs[cmp_name] = a.eq_key + method_lines.append( + indent + f" {cmp_name}(self.{a.name})," + ) + else: + method_lines.append(indent + f" self.{a.name},") method_lines.append(indent + " " + closing_braces) if cache_hash: - method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) + method_lines.append(tab + f"if self.{_hash_cache_field} is None:") if frozen: append_hash_computation_lines( - "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 + f"object.__setattr__(self, '{_hash_cache_field}', ", tab * 2 ) method_lines.append(tab * 2 + ")") # close __setattr__ else: append_hash_computation_lines( - "self.%s = " % _hash_cache_field, tab * 2 + f"self.{_hash_cache_field} = ", tab * 2 ) - method_lines.append(tab + "return self.%s" % _hash_cache_field) + method_lines.append(tab + f"return self.{_hash_cache_field}") else: append_hash_computation_lines("return ", tab) script = "\n".join(method_lines) - return _make_method("__hash__", script, unique_filename) + return _make_method("__hash__", script, unique_filename, globs) def _add_hash(cls, attrs): @@ -1785,27 +1750,15 @@ def _make_eq(cls, attrs): others = [" ) == ("] for a in attrs: if a.eq_key: - cmp_name = "_%s_key" % (a.name,) + cmp_name = f"_{a.name}_key" # Add the key function to the global namespace # of the evaluated function. globs[cmp_name] = a.eq_key - lines.append( - " %s(self.%s)," - % ( - cmp_name, - a.name, - ) - ) - others.append( - " %s(other.%s)," - % ( - cmp_name, - a.name, - ) - ) + lines.append(f" {cmp_name}(self.{a.name}),") + others.append(f" {cmp_name}(other.{a.name}),") else: - lines.append(" self.%s," % (a.name,)) - others.append(" other.%s," % (a.name,)) + lines.append(f" self.{a.name},") + others.append(f" other.{a.name},") lines += others + [" )"] else: @@ -1885,134 +1838,61 @@ def _add_eq(cls, attrs=None): return cls -if HAS_F_STRINGS: - - def _make_repr(attrs, ns, cls): - unique_filename = _generate_unique_filename(cls, "repr") - # Figure out which attributes to include, and which function to use to - # format them. The a.repr value can be either bool or a custom - # callable. - attr_names_with_reprs = tuple( - (a.name, (repr if a.repr is True else a.repr), a.init) - for a in attrs - if a.repr is not False - ) - globs = { - name + "_repr": r - for name, r, _ in attr_names_with_reprs - if r != repr - } - globs["_compat"] = _compat - globs["AttributeError"] = AttributeError - globs["NOTHING"] = NOTHING - attribute_fragments = [] - for name, r, i in attr_names_with_reprs: - accessor = ( - "self." + name - if i - else 'getattr(self, "' + name + '", NOTHING)' - ) - fragment = ( - "%s={%s!r}" % (name, accessor) - if r == repr - else "%s={%s_repr(%s)}" % (name, name, accessor) - ) - attribute_fragments.append(fragment) - repr_fragment = ", ".join(attribute_fragments) - - if ns is None: - cls_name_fragment = ( - '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' - ) - else: - cls_name_fragment = ns + ".{self.__class__.__name__}" - - lines = [ - "def __repr__(self):", - " try:", - " already_repring = _compat.repr_context.already_repring", - " except AttributeError:", - " already_repring = {id(self),}", - " _compat.repr_context.already_repring = already_repring", - " else:", - " if id(self) in already_repring:", - " return '...'", - " else:", - " already_repring.add(id(self))", - " try:", - " return f'%s(%s)'" % (cls_name_fragment, repr_fragment), - " finally:", - " already_repring.remove(id(self))", - ] - - return _make_method( - "__repr__", "\n".join(lines), unique_filename, globs=globs +def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' ) - -else: - - def _make_repr(attrs, ns, _): - """ - Make a repr method that includes relevant *attrs*, adding *ns* to the - full name. - """ - - # Figure out which attributes to include, and which function to use to - # format them. The a.repr value can be either bool or a custom - # callable. - attr_names_with_reprs = tuple( - (a.name, repr if a.repr is True else a.repr) - for a in attrs - if a.repr is not False + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) - def __repr__(self): - """ - Automatically created by attrs. - """ - try: - already_repring = _compat.repr_context.already_repring - except AttributeError: - already_repring = set() - _compat.repr_context.already_repring = already_repring - - if id(self) in already_repring: - return "..." - real_cls = self.__class__ - if ns is None: - qualname = getattr(real_cls, "__qualname__", None) - if qualname is not None: # pragma: no cover - # This case only happens on Python 3.5 and 3.6. We exclude - # it from coverage, because we don't want to slow down our - # test suite by running them under coverage too for this - # one line. - class_name = qualname.rsplit(">.", 1)[-1] - else: - class_name = real_cls.__name__ - else: - class_name = ns + "." + real_cls.__name__ + if ns is None: + cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" - # Since 'self' remains on the stack (i.e.: strongly referenced) - # for the duration of this call, it's safe to depend on id(...) - # stability, and not need to track the instance and therefore - # worry about properties like weakref- or hash-ability. - already_repring.add(id(self)) - try: - result = [class_name, "("] - first = True - for name, attr_repr in attr_names_with_reprs: - if first: - first = False - else: - result.append(", ") - result.extend( - (name, "=", attr_repr(getattr(self, name, NOTHING))) - ) - return "".join(result) + ")" - finally: - already_repring.remove(id(self)) + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + f" return f'{cls_name_fragment}({repr_fragment})'", + " finally:", + " already_repring.remove(id(self))", + ] - return __repr__ + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) def _add_repr(cls, ns=None, attrs=None): @@ -2028,7 +1908,7 @@ def _add_repr(cls, ns=None, attrs=None): def fields(cls): """ - Return the tuple of ``attrs`` attributes for a class. + Return the tuple of *attrs* attributes for a class. The tuple also allows accessing the fields by their names (see below for examples). @@ -2036,50 +1916,57 @@ def fields(cls): :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* class. :rtype: tuple (with name accessors) of `attrs.Attribute` - .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields - by name. + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + .. versionchanged:: 23.1.0 Add support for generic classes. """ - if not isclass(cls): + generic_base = get_generic_base(cls) + + if generic_base is None and not isinstance(cls, type): raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: - raise NotAnAttrsClassError( - "{cls!r} is not an attrs-decorated class.".format(cls=cls) - ) + if generic_base is not None: + attrs = getattr(generic_base, "__attrs_attrs__", None) + if attrs is not None: + # Even though this is global state, stick it on here to speed + # it up. We rely on `cls` being cached for this to be + # efficient. + cls.__attrs_attrs__ = attrs + return attrs + raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.") + return attrs def fields_dict(cls): """ - Return an ordered dictionary of ``attrs`` attributes for a class, whose + Return an ordered dictionary of *attrs* attributes for a class, whose keys are the attribute names. :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* class. - :rtype: an ordered dict where keys are attribute names and values are - `attrs.Attribute`\\ s. This will be a `dict` if it's - naturally ordered like on Python 3.6+ or an - :class:`~collections.OrderedDict` otherwise. + :rtype: dict .. versionadded:: 18.1.0 """ - if not isclass(cls): + if not isinstance(cls, type): raise TypeError("Passed object must be a class.") attrs = getattr(cls, "__attrs_attrs__", None) if attrs is None: - raise NotAnAttrsClassError( - "{cls!r} is not an attrs-decorated class.".format(cls=cls) - ) - return ordered_dict(((a.name, a) for a in attrs)) + raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.") + return {a.name: a for a in attrs} def validate(inst): @@ -2088,7 +1975,7 @@ def validate(inst): Leaves all exceptions through. - :param inst: Instance of a class with ``attrs`` attributes. + :param inst: Instance of a class with *attrs* attributes. """ if _config._run_validators is False: return @@ -2172,7 +2059,7 @@ def _make_init( if needs_cached_setattr: # Save the lookup overhead in __init__ if we need to circumvent # setattr hooks. - globs["_cached_setattr"] = _obj_setattr + globs["_cached_setattr_get"] = _obj_setattr.__get__ init = _make_method( "__attrs_init__" if attrs_init else "__init__", @@ -2189,7 +2076,7 @@ def _setattr(attr_name, value_var, has_on_setattr): """ Use the cached object.setattr to set *attr_name* to *value_var*. """ - return "_setattr('%s', %s)" % (attr_name, value_var) + return f"_setattr('{attr_name}', {value_var})" def _setattr_with_converter(attr_name, value_var, has_on_setattr): @@ -2212,7 +2099,7 @@ def _assign(attr_name, value, has_on_setattr): if has_on_setattr: return _setattr(attr_name, value, True) - return "self.%s = %s" % (attr_name, value) + return f"self.{attr_name} = {value}" def _assign_with_converter(attr_name, value_var, has_on_setattr): @@ -2230,63 +2117,6 @@ def _assign_with_converter(attr_name, value_var, has_on_setattr): ) -if PY2: - - def _unpack_kw_only_py2(attr_name, default=None): - """ - Unpack *attr_name* from _kw_only dict. - """ - if default is not None: - arg_default = ", %s" % default - else: - arg_default = "" - return "%s = _kw_only.pop('%s'%s)" % ( - attr_name, - attr_name, - arg_default, - ) - - def _unpack_kw_only_lines_py2(kw_only_args): - """ - Unpack all *kw_only_args* from _kw_only dict and handle errors. - - Given a list of strings "{attr_name}" and "{attr_name}={default}" - generates list of lines of code that pop attrs from _kw_only dict and - raise TypeError similar to builtin if required attr is missing or - extra key is passed. - - >>> print("\n".join(_unpack_kw_only_lines_py2(["a", "b=42"]))) - try: - a = _kw_only.pop('a') - b = _kw_only.pop('b', 42) - except KeyError as _key_error: - raise TypeError( - ... - if _kw_only: - raise TypeError( - ... - """ - lines = ["try:"] - lines.extend( - " " + _unpack_kw_only_py2(*arg.split("=")) - for arg in kw_only_args - ) - lines += """\ -except KeyError as _key_error: - raise TypeError( - '__init__() missing required keyword-only argument: %s' % _key_error - ) -if _kw_only: - raise TypeError( - '__init__() got an unexpected keyword argument %r' - % next(iter(_kw_only)) - ) -""".split( - "\n" - ) - return lines - - def _attrs_to_init_script( attrs, frozen, @@ -2317,7 +2147,7 @@ def _attrs_to_init_script( # Circumvent the __setattr__ descriptor to save one lookup per # assignment. # Note _setattr will be used again below if cache_hash is True - "_setattr = _cached_setattr.__get__(self, self.__class__)" + "_setattr = _cached_setattr_get(self)" ) if frozen is True: @@ -2335,7 +2165,7 @@ def fmt_setter(attr_name, value_var, has_on_setattr): if _is_slot_attr(attr_name, base_attr_map): return _setattr(attr_name, value_var, has_on_setattr) - return "_inst_dict['%s'] = %s" % (attr_name, value_var) + return f"_inst_dict['{attr_name}'] = {value_var}" def fmt_setter_with_converter( attr_name, value_var, has_on_setattr @@ -2373,7 +2203,9 @@ def fmt_setter_with_converter( has_on_setattr = a.on_setattr is not None or ( a.on_setattr is not setters.NO_OP and has_cls_on_setattr ) - arg_name = a.name.lstrip("_") + # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not + # explicitly provided + arg_name = a.alias has_factory = isinstance(a.default, Factory) if has_factory and a.default.takes_self: @@ -2383,12 +2215,12 @@ def fmt_setter_with_converter( if a.init is False: if has_factory: - init_factory_name = _init_factory_pat.format(a.name) + init_factory_name = _init_factory_pat % (a.name,) if a.converter is not None: lines.append( fmt_setter_with_converter( attr_name, - init_factory_name + "(%s)" % (maybe_self,), + init_factory_name + f"({maybe_self})", has_on_setattr, ) ) @@ -2398,7 +2230,7 @@ def fmt_setter_with_converter( lines.append( fmt_setter( attr_name, - init_factory_name + "(%s)" % (maybe_self,), + init_factory_name + f"({maybe_self})", has_on_setattr, ) ) @@ -2408,7 +2240,7 @@ def fmt_setter_with_converter( lines.append( fmt_setter_with_converter( attr_name, - "attr_dict['%s'].default" % (attr_name,), + f"attr_dict['{attr_name}'].default", has_on_setattr, ) ) @@ -2418,12 +2250,12 @@ def fmt_setter_with_converter( lines.append( fmt_setter( attr_name, - "attr_dict['%s'].default" % (attr_name,), + f"attr_dict['{attr_name}'].default", has_on_setattr, ) ) elif a.default is not NOTHING and not has_factory: - arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name) + arg = f"{arg_name}=attr_dict['{attr_name}'].default" if a.kw_only: kw_only_args.append(arg) else: @@ -2442,14 +2274,14 @@ def fmt_setter_with_converter( lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) elif has_factory: - arg = "%s=NOTHING" % (arg_name,) + arg = f"{arg_name}=NOTHING" if a.kw_only: kw_only_args.append(arg) else: args.append(arg) - lines.append("if %s is not NOTHING:" % (arg_name,)) + lines.append(f"if {arg_name} is not NOTHING:") - init_factory_name = _init_factory_pat.format(a.name) + init_factory_name = _init_factory_pat % (a.name,) if a.converter is not None: lines.append( " " @@ -2504,21 +2336,11 @@ def fmt_setter_with_converter( if a.init is True: if a.type is not None and a.converter is None: annotations[arg_name] = a.type - elif a.converter is not None and not PY2: + elif a.converter is not None: # Try to get the type from the converter. - sig = None - try: - sig = inspect.signature(a.converter) - except (ValueError, TypeError): # inspect failed - pass - if sig: - sig_params = list(sig.parameters.values()) - if ( - sig_params - and sig_params[0].annotation - is not inspect.Parameter.empty - ): - annotations[arg_name] = sig_params[0].annotation + t = _AnnotationExtractor(a.converter).get_first_param_type() + if t: + annotations[arg_name] = t if attrs_to_validate: # we can skip this if there are no validators. names_for_globals["_config"] = _config @@ -2526,16 +2348,14 @@ def fmt_setter_with_converter( for a in attrs_to_validate: val_name = "__attr_validator_" + a.name attr_name = "__attr_" + a.name - lines.append( - " %s(self, %s, self.%s)" % (val_name, attr_name, a.name) - ) + lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") names_for_globals[val_name] = a.validator names_for_globals[attr_name] = a if post_init: lines.append("self.__attrs_post_init__()") - # because this is set only after __attrs_post_init is called, a crash + # because this is set only after __attrs_post_init__ is called, a crash # will result if post-init tries to access the hash code. This seemed # preferable to setting this beforehand, in which case alteration to # field values during post-init combined with post-init accessing the @@ -2555,44 +2375,55 @@ def fmt_setter_with_converter( # For exceptions we rely on BaseException.__init__ for proper # initialization. if is_exc: - vals = ",".join("self." + a.name for a in attrs if a.init) + vals = ",".join(f"self.{a.name}" for a in attrs if a.init) - lines.append("BaseException.__init__(self, %s)" % (vals,)) + lines.append(f"BaseException.__init__(self, {vals})") args = ", ".join(args) if kw_only_args: - if PY2: - lines = _unpack_kw_only_lines_py2(kw_only_args) + lines + args += "%s*, %s" % ( + ", " if args else "", # leading comma + ", ".join(kw_only_args), # kw_only args + ) - args += "%s**_kw_only" % (", " if args else "",) # leading comma - else: - args += "%s*, %s" % ( - ", " if args else "", # leading comma - ", ".join(kw_only_args), # kw_only args - ) return ( - """\ -def {init_name}(self, {args}): - {lines} -""".format( - init_name=("__attrs_init__" if attrs_init else "__init__"), - args=args, - lines="\n ".join(lines) if lines else "pass", + "def %s(self, %s):\n %s\n" + % ( + ("__attrs_init__" if attrs_init else "__init__"), + args, + "\n ".join(lines) if lines else "pass", ), names_for_globals, annotations, ) -class Attribute(object): +def _default_init_alias_for(name: str) -> str: + """ + The default __init__ parameter name for a field. + + This performs private-name adjustment via leading-unscore stripping, + and is the default value of Attribute.alias if not provided. + """ + + return name.lstrip("_") + + +class Attribute: """ *Read-only* representation of an attribute. + .. warning:: + + You should never instantiate this class yourself. + The class has *all* arguments of `attr.ib` (except for ``factory`` which is only syntactic sugar for ``default=Factory(...)`` plus the following: - ``name`` (`str`): The name of the attribute. + - ``alias`` (`str`): The __init__ parameter name of the attribute, after + any explicit overrides and default private-attribute-name handling. - ``inherited`` (`bool`): Whether or not that attribute has been inherited from a base class. - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables @@ -2608,12 +2439,16 @@ class Attribute(object): - Validators get them passed as the first argument. - The :ref:`field transformer ` hook receives a list of them. + - The ``alias`` property exposes the __init__ parameter name of the field, + with any overrides and default private-attribute handling applied. + .. versionadded:: 20.1.0 *inherited* .. versionadded:: 20.1.0 *on_setattr* .. versionchanged:: 20.2.0 *inherited* is not taken into account for equality checks and hashing anymore. .. versionadded:: 21.1.0 *eq_key* and *order_key* + .. versionadded:: 22.2.0 *alias* For the full version history of the fields, see `attr.ib`. """ @@ -2635,6 +2470,7 @@ class Attribute(object): "kw_only", "inherited", "on_setattr", + "alias", ) def __init__( @@ -2656,13 +2492,14 @@ def __init__( order=None, order_key=None, on_setattr=None, + alias=None, ): eq, eq_key, order, order_key = _determine_attrib_eq_order( cmp, eq_key or eq, order_key or order, True ) # Cache this descriptor here to speed things up later. - bound_setattr = _obj_setattr.__get__(self, Attribute) + bound_setattr = _obj_setattr.__get__(self) # Despite the big red warning, people *do* instantiate `Attribute` # themselves. @@ -2680,7 +2517,7 @@ def __init__( bound_setattr( "metadata", ( - metadata_proxy(metadata) + types.MappingProxyType(dict(metadata)) # Shallow copy if metadata else _empty_metadata_singleton ), @@ -2689,6 +2526,7 @@ def __init__( bound_setattr("kw_only", kw_only) bound_setattr("inherited", inherited) bound_setattr("on_setattr", on_setattr) + bound_setattr("alias", alias) def __setattr__(self, name, value): raise FrozenInstanceError() @@ -2721,25 +2559,16 @@ def from_counting_attr(cls, name, ca, type=None): type=type, cmp=None, inherited=False, - **inst_dict + **inst_dict, ) - @property - def cmp(self): - """ - Simulate the presence of a cmp attribute and warn. - """ - warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2) - - return self.eq and self.order - - # Don't use attr.evolve since fields(Attribute) doesn't work + # Don't use attrs.evolve since fields(Attribute) doesn't work def evolve(self, **changes): """ Copy *self* and apply *changes*. - This works similarly to `attr.evolve` but that function does not work - with ``Attribute``. + This works similarly to `attrs.evolve` but that function does not work + with `Attribute`. It is mainly meant to be used for `transform-fields`. @@ -2768,14 +2597,14 @@ def __setstate__(self, state): self._setattrs(zip(self.__slots__, state)) def _setattrs(self, name_values_pairs): - bound_setattr = _obj_setattr.__get__(self, Attribute) + bound_setattr = _obj_setattr.__get__(self) for name, value in name_values_pairs: if name != "metadata": bound_setattr(name, value) else: bound_setattr( name, - metadata_proxy(value) + types.MappingProxyType(dict(value)) if value else _empty_metadata_singleton, ) @@ -2793,6 +2622,7 @@ def _setattrs(self, name_values_pairs): hash=(name != "metadata"), init=True, inherited=False, + alias=_default_init_alias_for(name), ) for name in Attribute.__slots__ ] @@ -2806,7 +2636,7 @@ def _setattrs(self, name_values_pairs): ) -class _CountingAttr(object): +class _CountingAttr: """ Intermediate representation of attributes that uses a counter to preserve the order in which the attributes have been defined. @@ -2831,10 +2661,12 @@ class _CountingAttr(object): "type", "kw_only", "on_setattr", + "alias", ) __attrs_attrs__ = tuple( Attribute( name=name, + alias=_default_init_alias_for(name), default=NOTHING, validator=None, repr=True, @@ -2858,10 +2690,12 @@ class _CountingAttr(object): "hash", "init", "on_setattr", + "alias", ) ) + ( Attribute( name="metadata", + alias="metadata", default=None, validator=None, repr=True, @@ -2896,6 +2730,7 @@ def __init__( order, order_key, on_setattr, + alias, ): _CountingAttr.cls_counter += 1 self.counter = _CountingAttr.cls_counter @@ -2913,6 +2748,7 @@ def __init__( self.type = type self.kw_only = kw_only self.on_setattr = on_setattr + self.alias = alias def validator(self, meth): """ @@ -2949,7 +2785,7 @@ def default(self, meth): _CountingAttr = _add_eq(_add_repr(_CountingAttr)) -class Factory(object): +class Factory: """ Stores a factory callable. @@ -2967,10 +2803,6 @@ class Factory(object): __slots__ = ("factory", "takes_self") def __init__(self, factory, takes_self=False): - """ - `Factory` is part of the default machinery so if we want a default - value here, we have to implement it ourselves. - """ self.factory = factory self.takes_self = takes_self @@ -3008,18 +2840,17 @@ def __setstate__(self, state): def make_class(name, attrs, bases=(object,), **attributes_arguments): - """ + r""" A quick way to create a new class called *name* with *attrs*. :param str name: The name for the new class. :param attrs: A list of names or a dictionary of mappings of names to - attributes. + `attr.ib`\ s / `attrs.field`\ s. - If *attrs* is a list or an ordered dict (`dict` on Python 3.6+, - `collections.OrderedDict` otherwise), the order is deduced from - the order of the names or attributes inside *attrs*. Otherwise the - order of the definition of the attributes is used. + The order is deduced from the order of the names or attributes inside + *attrs*. Otherwise the order of the definition of the attributes is + used. :type attrs: `list` or `dict` :param tuple bases: Classes that the new class will subclass. @@ -3035,7 +2866,7 @@ def make_class(name, attrs, bases=(object,), **attributes_arguments): if isinstance(attrs, dict): cls_dict = attrs elif isinstance(attrs, (list, tuple)): - cls_dict = dict((a, attrib()) for a in attrs) + cls_dict = {a: attrib() for a in attrs} else: raise TypeError("attrs argument must be a dict or a list.") @@ -3051,7 +2882,7 @@ def make_class(name, attrs, bases=(object,), **attributes_arguments): if user_init is not None: body["__init__"] = user_init - type_ = new_class(name, bases, {}, lambda ns: ns.update(body)) + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) # For pickling to work, the __module__ variable needs to be set to the # frame where the class is created. Bypass this step in environments where @@ -3084,7 +2915,7 @@ def make_class(name, attrs, bases=(object,), **attributes_arguments): @attrs(slots=True, hash=True) -class _AndValidator(object): +class _AndValidator: """ Compose many validators to a single one. """ @@ -3138,36 +2969,19 @@ def pipe_converter(val): return val - if not PY2: - if not converters: - # If the converter list is empty, pipe_converter is the identity. - A = typing.TypeVar("A") - pipe_converter.__annotations__ = {"val": A, "return": A} - else: - # Get parameter type. - sig = None - try: - sig = inspect.signature(converters[0]) - except (ValueError, TypeError): # inspect failed - pass - if sig: - params = list(sig.parameters.values()) - if ( - params - and params[0].annotation is not inspect.Parameter.empty - ): - pipe_converter.__annotations__["val"] = params[ - 0 - ].annotation - # Get return type. - sig = None - try: - sig = inspect.signature(converters[-1]) - except (ValueError, TypeError): # inspect failed - pass - if sig and sig.return_annotation is not inspect.Signature().empty: - pipe_converter.__annotations__[ - "return" - ] = sig.return_annotation + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__ = {"val": A, "return": A} + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + # Get return type from last converter. + rt = _AnnotationExtractor(converters[-1]).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt return pipe_converter diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_next_gen.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_next_gen.py old mode 100755 new mode 100644 index 0682536..8f7c0b9 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_next_gen.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_next_gen.py @@ -1,8 +1,8 @@ # SPDX-License-Identifier: MIT """ -These are Python 3.6+-only and keyword-only APIs that call `attr.s` and -`attr.ib` with different default values. +These are keyword-only APIs that call `attr.s` and `attr.ib` with different +default values. """ @@ -26,6 +26,7 @@ def define( *, these=None, repr=None, + unsafe_hash=None, hash=None, init=None, slots=True, @@ -45,20 +46,24 @@ def define( match_args=True, ): r""" - Define an ``attrs`` class. + Define an *attrs* class. Differences to the classic `attr.s` that it uses underneath: - - Automatically detect whether or not *auto_attribs* should be `True` - (c.f. *auto_attribs* parameter). + - Automatically detect whether or not *auto_attribs* should be `True` (c.f. + *auto_attribs* parameter). - If *frozen* is `False`, run converters and validators when setting an attribute by default. - - *slots=True* (see :term:`slotted classes` for potentially surprising - behaviors) + - *slots=True* + + .. caution:: + + Usually this has only upsides and few visible effects in everyday + programming. But it *can* lead to some suprising behaviors, so please + make sure to read :term:`slotted classes`. - *auto_exc=True* - *auto_detect=True* - *order=False* - - *match_args=True* - Some options that were only relevant on Python 2 or were kept around for backwards-compatibility have been removed. @@ -77,6 +82,8 @@ def define( .. versionadded:: 20.1.0 .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). """ def do_it(cls, auto_attribs): @@ -85,6 +92,7 @@ def do_it(cls, auto_attribs): these=these, repr=repr, hash=hash, + unsafe_hash=unsafe_hash, init=init, slots=slots, frozen=frozen, @@ -159,17 +167,23 @@ def field( hash=None, init=True, metadata=None, + type=None, converter=None, factory=None, kw_only=False, eq=None, order=None, on_setattr=None, + alias=None, ): """ Identical to `attr.ib`, except keyword-only and with some arguments removed. + .. versionadded:: 23.1.0 + The *type* parameter has been re-added; mostly for + {func}`attrs.make_class`. Please note that type checkers ignore this + metadata. .. versionadded:: 20.1.0 """ return attrib( @@ -179,12 +193,14 @@ def field( hash=hash, init=init, metadata=metadata, + type=type, converter=converter, factory=factory, kw_only=kw_only, eq=eq, order=order, on_setattr=on_setattr, + alias=alias, ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_typing_compat.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_typing_compat.pyi new file mode 100644 index 0000000..ca7b71e --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_typing_compat.pyi @@ -0,0 +1,15 @@ +from typing import Any, ClassVar, Protocol + +# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. +MYPY = False + +if MYPY: + # A protocol to be able to statically accept an attrs class. + class AttrsInstance_(Protocol): + __attrs_attrs__: ClassVar[Any] + +else: + # For type checkers without plug-in support use an empty protocol that + # will (hopefully) be combined into a union. + class AttrsInstance_(Protocol): + pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_version_info.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_version_info.py old mode 100755 new mode 100644 index cdaeec3..51a1312 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_version_info.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/_version_info.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: MIT -from __future__ import absolute_import, division, print_function from functools import total_ordering @@ -10,7 +9,7 @@ @total_ordering @attrs(eq=False, order=False, slots=True, frozen=True) -class VersionInfo(object): +class VersionInfo: """ A version object that can be compared to tuple of length 1--4: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.py old mode 100755 new mode 100644 index 1fb6c05..4cada10 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.py @@ -4,15 +4,11 @@ Commonly useful converters. """ -from __future__ import absolute_import, division, print_function - -from ._compat import PY2 -from ._make import NOTHING, Factory, pipe +import typing -if not PY2: - import inspect - import typing +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Factory, pipe __all__ = [ @@ -42,22 +38,15 @@ def optional_converter(val): return None return converter(val) - if not PY2: - sig = None - try: - sig = inspect.signature(converter) - except (ValueError, TypeError): # inspect failed - pass - if sig: - params = list(sig.parameters.values()) - if params and params[0].annotation is not inspect.Parameter.empty: - optional_converter.__annotations__["val"] = typing.Optional[ - params[0].annotation - ] - if sig.return_annotation is not inspect.Signature.empty: - optional_converter.__annotations__["return"] = typing.Optional[ - sig.return_annotation - ] + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] return optional_converter @@ -152,4 +141,4 @@ def to_bool(val): except TypeError: # Raised when "val" is not hashable (e.g., lists) pass - raise ValueError("Cannot convert value to bool: {}".format(val)) + raise ValueError(f"Cannot convert value to bool: {val}") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.pyi index 0f58088..5abb49f 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.pyi +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/converters.pyi @@ -1,4 +1,4 @@ -from typing import Callable, Optional, TypeVar, overload +from typing import Callable, TypeVar, overload from . import _ConverterType diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/exceptions.py old mode 100755 new mode 100644 index b2f1edc..2883493 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/exceptions.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/exceptions.py @@ -1,7 +1,5 @@ # SPDX-License-Identifier: MIT -from __future__ import absolute_import, division, print_function - class FrozenError(AttributeError): """ @@ -36,7 +34,7 @@ class FrozenAttributeError(FrozenError): class AttrsAttributeNotFoundError(ValueError): """ - An ``attrs`` function couldn't find an attribute that the user asked for. + An *attrs* function couldn't find an attribute that the user asked for. .. versionadded:: 16.2.0 """ @@ -44,7 +42,7 @@ class AttrsAttributeNotFoundError(ValueError): class NotAnAttrsClassError(ValueError): """ - A non-``attrs`` class has been passed into an ``attrs`` function. + A non-*attrs* class has been passed into an *attrs* function. .. versionadded:: 16.2.0 """ @@ -52,7 +50,7 @@ class NotAnAttrsClassError(ValueError): class DefaultAlreadySetError(RuntimeError): """ - A default has been set using ``attr.ib()`` and is attempted to be reset + A default has been set when defining the field and is attempted to be reset using the decorator. .. versionadded:: 17.1.0 @@ -61,8 +59,7 @@ class DefaultAlreadySetError(RuntimeError): class UnannotatedAttributeError(RuntimeError): """ - A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type - annotation. + A class with ``auto_attribs=True`` has a field without a type annotation. .. versionadded:: 17.3.0 """ @@ -70,7 +67,7 @@ class UnannotatedAttributeError(RuntimeError): class PythonTooOldError(RuntimeError): """ - It was attempted to use an ``attrs`` feature that requires a newer Python + It was attempted to use an *attrs* feature that requires a newer Python version. .. versionadded:: 18.2.0 @@ -79,8 +76,8 @@ class PythonTooOldError(RuntimeError): class NotCallableError(TypeError): """ - A ``attr.ib()`` requiring a callable has been set with a value - that is not callable. + A field requiring a callable has been set with a value that is not + callable. .. versionadded:: 19.2.0 """ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.py old mode 100755 new mode 100644 index a1978a8..a1e40c9 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.py @@ -4,9 +4,6 @@ Commonly useful filters for `attr.asdict`. """ -from __future__ import absolute_import, division, print_function - -from ._compat import isclass from ._make import Attribute @@ -15,7 +12,8 @@ def _split_what(what): Returns a tuple of `frozenset`s of classes and attributes. """ return ( - frozenset(cls for cls in what if isclass(cls)), + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, str)), frozenset(cls for cls in what if isinstance(cls, Attribute)), ) @@ -25,14 +23,21 @@ def include(*what): Include *what*. :param what: What to include. - :type what: `list` of `type` or `attrs.Attribute`\\ s + :type what: `list` of classes `type`, field names `str` or + `attrs.Attribute`\\ s :rtype: `callable` + + .. versionchanged:: 23.1.0 Accept strings with field names. """ - cls, attrs = _split_what(what) + cls, names, attrs = _split_what(what) def include_(attribute, value): - return value.__class__ in cls or attribute in attrs + return ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) return include_ @@ -42,13 +47,20 @@ def exclude(*what): Exclude *what*. :param what: What to exclude. - :type what: `list` of classes or `attrs.Attribute`\\ s. + :type what: `list` of classes `type`, field names `str` or + `attrs.Attribute`\\ s. :rtype: `callable` + + .. versionchanged:: 23.3.0 Accept field name string as input argument """ - cls, attrs = _split_what(what) + cls, names, attrs = _split_what(what) def exclude_(attribute, value): - return value.__class__ not in cls and attribute not in attrs + return not ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) return exclude_ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.pyi index 9938668..8a02fa0 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.pyi +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/filters.pyi @@ -2,5 +2,5 @@ from typing import Any, Union from . import Attribute, _FilterType -def include(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... -def exclude(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... +def include(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... +def exclude(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.py old mode 100755 new mode 100644 index b1cbb5d..12ed675 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.py @@ -4,7 +4,6 @@ Commonly used hooks for on_setattr. """ -from __future__ import absolute_import, division, print_function from . import _config from .exceptions import FrozenAttributeError @@ -69,11 +68,6 @@ def convert(instance, attrib, new_value): return new_value +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# autodata stopped working, so the docstring is inlined in the API docs. NO_OP = object() -""" -Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. - -Does not work in `pipe` or within lists. - -.. versionadded:: 20.1.0 -""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.pyi index 3f5603c..72f7ce4 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.pyi +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/setters.pyi @@ -1,4 +1,4 @@ -from typing import Any, NewType, NoReturn, TypeVar, cast +from typing import Any, NewType, NoReturn, TypeVar from . import Attribute, _OnSetAttrType diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.py old mode 100755 new mode 100644 index 0b0c834..1488554 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.py @@ -4,24 +4,19 @@ Commonly useful validators. """ -from __future__ import absolute_import, division, print_function import operator import re from contextlib import contextmanager +from re import Pattern from ._config import get_run_validators, set_run_validators from ._make import _AndValidator, and_, attrib, attrs +from .converters import default_if_none from .exceptions import NotCallableError -try: - Pattern = re.Pattern -except AttributeError: # Python <3.7 lacks a Pattern type. - Pattern = type(re.compile("")) - - __all__ = [ "and_", "deep_iterable", @@ -37,6 +32,8 @@ "lt", "matches_re", "max_len", + "min_len", + "not_", "optional", "provides", "set_disabled", @@ -92,7 +89,7 @@ def disabled(): @attrs(repr=False, slots=True, hash=True) -class _InstanceOfValidator(object): +class _InstanceOfValidator: type = attrib() def __call__(self, inst, attr, value): @@ -126,7 +123,7 @@ def instance_of(type): `isinstance` therefore it's also valid to pass a tuple of types). :param type: The type to check for. - :type type: type or tuple of types + :type type: type or tuple of type :raises TypeError: With a human readable error message, the attribute (of type `attrs.Attribute`), the expected type, and the value it @@ -136,7 +133,7 @@ def instance_of(type): @attrs(repr=False, frozen=True, slots=True) -class _MatchesReValidator(object): +class _MatchesReValidator: pattern = attrib() match_func = attrib() @@ -169,17 +166,15 @@ def matches_re(regex, flags=0, func=None): :param regex: a regex string or precompiled pattern to match against :param int flags: flags that will be passed to the underlying re function (default 0) - :param callable func: which underlying `re` function to call (options - are `re.fullmatch`, `re.search`, `re.match`, default - is ``None`` which means either `re.fullmatch` or an emulation of - it on Python 2). For performance reasons, they won't be used directly - but on a pre-`re.compile`\ ed pattern. + :param callable func: which underlying `re` function to call. Valid options + are `re.fullmatch`, `re.search`, and `re.match`; the default ``None`` + means `re.fullmatch`. For performance reasons, the pattern is always + precompiled using `re.compile`. .. versionadded:: 19.2.0 .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. """ - fullmatch = getattr(re, "fullmatch", None) - valid_funcs = (fullmatch, None, re.search, re.match) + valid_funcs = (re.fullmatch, None, re.search, re.match) if func not in valid_funcs: raise ValueError( "'func' must be one of {}.".format( @@ -205,19 +200,14 @@ def matches_re(regex, flags=0, func=None): match_func = pattern.match elif func is re.search: match_func = pattern.search - elif fullmatch: + else: match_func = pattern.fullmatch - else: # Python 2 fullmatch emulation (https://bugs.python.org/issue16203) - pattern = re.compile( - r"(?:{})\Z".format(pattern.pattern), pattern.flags - ) - match_func = pattern.match return _MatchesReValidator(pattern, match_func) @attrs(repr=False, slots=True, hash=True) -class _ProvidesValidator(object): +class _ProvidesValidator: interface = attrib() def __call__(self, inst, attr, value): @@ -254,12 +244,22 @@ def provides(interface): :raises TypeError: With a human readable error message, the attribute (of type `attrs.Attribute`), the expected interface, and the value it got. + + .. deprecated:: 23.1.0 """ + import warnings + + warnings.warn( + "attrs's zope-interface support is deprecated and will be removed in, " + "or after, April 2024.", + DeprecationWarning, + stacklevel=2, + ) return _ProvidesValidator(interface) @attrs(repr=False, slots=True, hash=True) -class _OptionalValidator(object): +class _OptionalValidator: validator = attrib() def __call__(self, inst, attr, value): @@ -280,20 +280,21 @@ def optional(validator): which can be set to ``None`` in addition to satisfying the requirements of the sub-validator. - :param validator: A validator (or a list of validators) that is used for - non-``None`` values. - :type validator: callable or `list` of callables. + :param Callable | tuple[Callable] | list[Callable] validator: A validator + (or validators) that is used for non-``None`` values. .. versionadded:: 15.1.0 .. versionchanged:: 17.1.0 *validator* can be a list of validators. + .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators. """ - if isinstance(validator, list): + if isinstance(validator, (list, tuple)): return _OptionalValidator(_AndValidator(validator)) + return _OptionalValidator(validator) @attrs(repr=False, slots=True, hash=True) -class _InValidator(object): +class _InValidator: options = attrib() def __call__(self, inst, attr, value): @@ -306,7 +307,10 @@ def __call__(self, inst, attr, value): raise ValueError( "'{name}' must be in {options!r} (got {value!r})".format( name=attr.name, options=self.options, value=value - ) + ), + attr, + self.options, + value, ) def __repr__(self): @@ -329,12 +333,16 @@ def in_(options): got. .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. """ return _InValidator(options) @attrs(repr=False, slots=False, hash=True) -class _IsCallableValidator(object): +class _IsCallableValidator: def __call__(self, inst, attr, value): """ We use a callable class to be able to change the ``__repr__``. @@ -357,13 +365,13 @@ def __repr__(self): def is_callable(): """ - A validator that raises a `attr.exceptions.NotCallableError` if the + A validator that raises a `attrs.exceptions.NotCallableError` if the initializer is called with a value for this particular attribute that is not callable. .. versionadded:: 19.1.0 - :raises `attr.exceptions.NotCallableError`: With a human readable error + :raises attrs.exceptions.NotCallableError: With a human readable error message containing the attribute (`attrs.Attribute`) name, and the value it got. """ @@ -371,7 +379,7 @@ def is_callable(): @attrs(repr=False, slots=True, hash=True) -class _DeepIterable(object): +class _DeepIterable: member_validator = attrib(validator=is_callable()) iterable_validator = attrib( default=None, validator=optional(is_callable()) @@ -391,7 +399,7 @@ def __repr__(self): iterable_identifier = ( "" if self.iterable_validator is None - else " {iterable!r}".format(iterable=self.iterable_validator) + else f" {self.iterable_validator!r}" ) return ( "".format(max=self.max_length) + return f"" def max_len(length): @@ -559,3 +569,152 @@ def max_len(length): .. versionadded:: 21.3.0 """ return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + raise ValueError( + "Length of '{name}' must be => {min}: {len}".format( + name=attr.name, min=self.min_length, len=len(value) + ) + ) + + def __repr__(self): + return f"" + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + :param int length: Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) + + +@attrs(repr=False, slots=True, hash=True) +class _SubclassOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not issubclass(value, self.type): + raise TypeError( + "'{name}' must be a subclass of {type!r} " + "(got {value!r}).".format( + name=attr.name, + type=self.type, + value=value, + ), + attr, + self.type, + value, + ) + + def __repr__(self): + return "".format( + type=self.type + ) + + +def _subclass_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `issubclass` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _SubclassOfValidator(type) + + +@attrs(repr=False, slots=True, hash=True) +class _NotValidator: + validator = attrib() + msg = attrib( + converter=default_if_none( + "not_ validator child '{validator!r}' " + "did not raise a captured error" + ) + ) + exc_types = attrib( + validator=deep_iterable( + member_validator=_subclass_of(Exception), + iterable_validator=instance_of(tuple), + ), + ) + + def __call__(self, inst, attr, value): + try: + self.validator(inst, attr, value) + except self.exc_types: + pass # suppress error to invert validity + else: + raise ValueError( + self.msg.format( + validator=self.validator, + exc_types=self.exc_types, + ), + attr, + self.validator, + value, + self.exc_types, + ) + + def __repr__(self): + return ( + "" + ).format( + what=self.validator, + exc_types=self.exc_types, + ) + + +def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): + """ + A validator that wraps and logically 'inverts' the validator passed to it. + It will raise a `ValueError` if the provided validator *doesn't* raise a + `ValueError` or `TypeError` (by default), and will suppress the exception + if the provided validator *does*. + + Intended to be used with existing validators to compose logic without + needing to create inverted variants, for example, ``not_(in_(...))``. + + :param validator: A validator to be logically inverted. + :param msg: Message to raise if validator fails. + Formatted with keys ``exc_types`` and ``validator``. + :type msg: str + :param exc_types: Exception type(s) to capture. + Other types raised by child validators will not be intercepted and + pass through. + + :raises ValueError: With a human readable error message, + the attribute (of type `attrs.Attribute`), + the validator that failed to raise an exception, + the value it got, + and the expected exception types. + + .. versionadded:: 22.2.0 + """ + try: + exc_types = tuple(exc_types) + except TypeError: + exc_types = (exc_types,) + return _NotValidator(validator, msg, exc_types) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.pyi index 5e00b85..d194a75 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.pyi +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attr/validators.pyi @@ -18,6 +18,7 @@ from typing import ( ) from . import _ValidatorType +from . import _ValidatorArgType _T = TypeVar("_T") _T1 = TypeVar("_T1") @@ -50,7 +51,9 @@ def instance_of( def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... def provides(interface: Any) -> _ValidatorType[Any]: ... def optional( - validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]] + validator: Union[ + _ValidatorType[_T], List[_ValidatorType[_T]], Tuple[_ValidatorType[_T]] + ] ) -> _ValidatorType[Optional[_T]]: ... def in_(options: Container[_T]) -> _ValidatorType[_T]: ... def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... @@ -62,7 +65,7 @@ def matches_re( ] = ..., ) -> _ValidatorType[AnyStr]: ... def deep_iterable( - member_validator: _ValidatorType[_T], + member_validator: _ValidatorArgType[_T], iterable_validator: Optional[_ValidatorType[_I]] = ..., ) -> _ValidatorType[_I]: ... def deep_mapping( @@ -76,3 +79,10 @@ def le(val: _T) -> _ValidatorType[_T]: ... def ge(val: _T) -> _ValidatorType[_T]: ... def gt(val: _T) -> _ValidatorType[_T]: ... def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... +def not_( + validator: _ValidatorType[_T], + *, + msg: Optional[str] = None, + exc_types: Union[Type[Exception], Iterable[Type[Exception]]] = ..., +) -> _ValidatorType[_T]: ... diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/AUTHORS.rst b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/AUTHORS.rst deleted file mode 100644 index f14ef6c..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/AUTHORS.rst +++ /dev/null @@ -1,11 +0,0 @@ -Credits -======= - -``attrs`` is written and maintained by `Hynek Schlawack `_. - -The development is kindly supported by `Variomedia AG `_. - -A full list of contributors can be found in `GitHub's overview `_. - -It’s the spiritual successor of `characteristic `_ and aspires to fix some of it clunkiness and unfortunate decisions. -Both were inspired by Twisted’s `FancyEqMixin `_ but both are implemented using class decorators because `subclassing is bad for you `_, m’kay? diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/METADATA deleted file mode 100644 index aa327d5..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/METADATA +++ /dev/null @@ -1,232 +0,0 @@ -Metadata-Version: 2.1 -Name: attrs -Version: 21.4.0 -Summary: Classes Without Boilerplate -Home-page: https://www.attrs.org/ -Author: Hynek Schlawack -Author-email: hs@ox.cx -Maintainer: Hynek Schlawack -Maintainer-email: hs@ox.cx -License: MIT -Project-URL: Documentation, https://www.attrs.org/ -Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html -Project-URL: Bug Tracker, https://github.com/python-attrs/attrs/issues -Project-URL: Source Code, https://github.com/python-attrs/attrs -Project-URL: Funding, https://github.com/sponsors/hynek -Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi -Project-URL: Ko-fi, https://ko-fi.com/the_hynek -Keywords: class,attribute,boilerplate -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: Natural Language :: English -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* -Description-Content-Type: text/x-rst -License-File: LICENSE -License-File: AUTHORS.rst -Provides-Extra: dev -Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'dev' -Requires-Dist: hypothesis ; extra == 'dev' -Requires-Dist: pympler ; extra == 'dev' -Requires-Dist: pytest (>=4.3.0) ; extra == 'dev' -Requires-Dist: six ; extra == 'dev' -Requires-Dist: mypy ; extra == 'dev' -Requires-Dist: pytest-mypy-plugins ; extra == 'dev' -Requires-Dist: zope.interface ; extra == 'dev' -Requires-Dist: furo ; extra == 'dev' -Requires-Dist: sphinx ; extra == 'dev' -Requires-Dist: sphinx-notfound-page ; extra == 'dev' -Requires-Dist: pre-commit ; extra == 'dev' -Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'dev' -Provides-Extra: docs -Requires-Dist: furo ; extra == 'docs' -Requires-Dist: sphinx ; extra == 'docs' -Requires-Dist: zope.interface ; extra == 'docs' -Requires-Dist: sphinx-notfound-page ; extra == 'docs' -Provides-Extra: tests -Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'tests' -Requires-Dist: hypothesis ; extra == 'tests' -Requires-Dist: pympler ; extra == 'tests' -Requires-Dist: pytest (>=4.3.0) ; extra == 'tests' -Requires-Dist: six ; extra == 'tests' -Requires-Dist: mypy ; extra == 'tests' -Requires-Dist: pytest-mypy-plugins ; extra == 'tests' -Requires-Dist: zope.interface ; extra == 'tests' -Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'tests' -Provides-Extra: tests_no_zope -Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'tests_no_zope' -Requires-Dist: hypothesis ; extra == 'tests_no_zope' -Requires-Dist: pympler ; extra == 'tests_no_zope' -Requires-Dist: pytest (>=4.3.0) ; extra == 'tests_no_zope' -Requires-Dist: six ; extra == 'tests_no_zope' -Requires-Dist: mypy ; extra == 'tests_no_zope' -Requires-Dist: pytest-mypy-plugins ; extra == 'tests_no_zope' -Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'tests_no_zope' - - -.. image:: https://www.attrs.org/en/stable/_static/attrs_logo.png - :alt: attrs logo - :align: center - - -``attrs`` is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka `dunder methods `_). -`Trusted by NASA `_ for Mars missions since 2020! - -Its main goal is to help you to write **concise** and **correct** software without slowing down your code. - -.. teaser-end - -For that, it gives you a class decorator and a way to declaratively define the attributes on that class: - -.. -code-begin- - -.. code-block:: pycon - - >>> from attrs import asdict, define, make_class, Factory - - >>> @define - ... class SomeClass: - ... a_number: int = 42 - ... list_of_numbers: list[int] = Factory(list) - ... - ... def hard_math(self, another_number): - ... return self.a_number + sum(self.list_of_numbers) * another_number - - - >>> sc = SomeClass(1, [1, 2, 3]) - >>> sc - SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) - - >>> sc.hard_math(3) - 19 - >>> sc == SomeClass(1, [1, 2, 3]) - True - >>> sc != SomeClass(2, [3, 2, 1]) - True - - >>> asdict(sc) - {'a_number': 1, 'list_of_numbers': [1, 2, 3]} - - >>> SomeClass() - SomeClass(a_number=42, list_of_numbers=[]) - - >>> C = make_class("C", ["a", "b"]) - >>> C("foo", "bar") - C(a='foo', b='bar') - - -After *declaring* your attributes ``attrs`` gives you: - -- a concise and explicit overview of the class's attributes, -- a nice human-readable ``__repr__``, -- a equality-checking methods, -- an initializer, -- and much more, - -*without* writing dull boilerplate code again and again and *without* runtime performance penalties. - -**Hate type annotations**!? -No problem! -Types are entirely **optional** with ``attrs``. -Simply assign ``attrs.field()`` to the attributes instead of annotating them with types. - ----- - -This example uses ``attrs``'s modern APIs that have been introduced in version 20.1.0, and the ``attrs`` package import name that has been added in version 21.3.0. -The classic APIs (``@attr.s``, ``attr.ib``, plus their serious business aliases) and the ``attr`` package import name will remain **indefinitely**. - -Please check out `On The Core API Names `_ for a more in-depth explanation. - - -Data Classes -============ - -On the tin, ``attrs`` might remind you of ``dataclasses`` (and indeed, ``dataclasses`` are a descendant of ``attrs``). -In practice it does a lot more and is more flexible. -For instance it allows you to define `special handling of NumPy arrays for equality checks `_, or allows more ways to `plug into the initialization process `_. - -For more details, please refer to our `comparison page `_. - - -.. -getting-help- - -Getting Help -============ - -Please use the ``python-attrs`` tag on `Stack Overflow `_ to get help. - -Answering questions of your fellow developers is also a great way to help the project! - - -.. -project-information- - -Project Information -=================== - -``attrs`` is released under the `MIT `_ license, -its documentation lives at `Read the Docs `_, -the code on `GitHub `_, -and the latest release on `PyPI `_. -It’s rigorously tested on Python 2.7, 3.5+, and PyPy. - -We collect information on **third-party extensions** in our `wiki `_. -Feel free to browse and add your own! - -If you'd like to contribute to ``attrs`` you're most welcome and we've written `a little guide `_ to get you started! - - -``attrs`` for Enterprise ------------------------- - -Available as part of the Tidelift Subscription. - -The maintainers of ``attrs`` and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. -Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. -`Learn more. `_ - - -Release Information -=================== - -21.4.0 (2021-12-29) -------------------- - -Changes -^^^^^^^ - -- Fixed the test suite on PyPy3.8 where ``cloudpickle`` does not work. - `#892 `_ -- Fixed ``coverage report`` for projects that use ``attrs`` and don't set a ``--source``. - `#895 `_, - `#896 `_ - -`Full changelog `_. - -Credits -======= - -``attrs`` is written and maintained by `Hynek Schlawack `_. - -The development is kindly supported by `Variomedia AG `_. - -A full list of contributors can be found in `GitHub's overview `_. - -It’s the spiritual successor of `characteristic `_ and aspires to fix some of it clunkiness and unfortunate decisions. -Both were inspired by Twisted’s `FancyEqMixin `_ but both are implemented using class decorators because `subclassing is bad for you `_, m’kay? - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/RECORD deleted file mode 100644 index 64b3a6d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/RECORD +++ /dev/null @@ -1,37 +0,0 @@ -attr/__init__.py,sha256=_zhJ4O8Q5KR5gaIrjX73vkR5nA6NjfpMGXQChEdNljI,1667 -attr/__init__.pyi,sha256=ubRkstoRHPpQN17iA0OCh8waIwZ5NeJgbz0lwI8XUjY,15100 -attr/_cmp.py,sha256=JP0N7OIyTqIR3prUDfMZOR4DV4tlV_xXf39-bQg7xOo,4165 -attr/_cmp.pyi,sha256=oyjJVytrwwkUJOoe332IiYzp6pCVZEKKcKveH-ev604,317 -attr/_compat.py,sha256=i8u27AAK_4SzQnmTf3aliGV27UdYbJxdZ-O0tOHbLU8,8396 -attr/_config.py,sha256=aj1Lh8t2CuVa5nSxgCrLQtg_ZSdO8ZKeNJQd6RvpIp8,892 -attr/_funcs.py,sha256=sm_D12y2IyRW_bCnR7M-O7U5qHaieXr0BzINwJ7_K38,14753 -attr/_make.py,sha256=D05j0_ckcVIRFn2xHch5SPUCwh3t7WpeFj-3Ku9SocQ,102736 -attr/_next_gen.py,sha256=s5jCsVEQ4IhOjAykP4N0ETaWpg0RsgQttMvEZErUrhQ,5752 -attr/_version_info.py,sha256=sxD9yNai0jGbur_-RGEQHbgV2YX5_5G9PhrhBA5pA54,2194 -attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 -attr/converters.py,sha256=uiiWTz8GLJe8I1Ty7UICK1DegVUnqHTXbOSnar7g7Nk,4078 -attr/converters.pyi,sha256=MQo7iEzPNVoFpKqD30sVwgVpdNoIeSCF2nsXvoxLZ-Y,416 -attr/exceptions.py,sha256=BMg7AljkJnvG-irMwL2TBHYlaLBXhSKnzoEWo4e42Zw,1981 -attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 -attr/filters.py,sha256=JGZgvPGkdOfttkoL6XhXS6ZCoaVV5nZ8GCYeZNUN_mE,1124 -attr/filters.pyi,sha256=_Sm80jGySETX_Clzdkon5NHVjQWRl3Y3liQKZX1czXc,215 -attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -attr/setters.py,sha256=rH_UtQuHgQEC7hfZyMO_SJW0R1Gus7-a83U8igZfqs8,1466 -attr/setters.pyi,sha256=7dM10rqpQVDW0y-iJUnq8rabdO5Wx2Sbo5LwNa0IXl0,573 -attr/validators.py,sha256=jVE9roaSOmTf0dJNSLHNaQNilkrlzc3pNNBKmv0g7pk,15966 -attr/validators.pyi,sha256=adn6rNbIXmRXlg_FKrTmWj0dOX0vKTsGG82Jd3YcJbQ,2268 -attrs-21.4.0.dist-info/AUTHORS.rst,sha256=wsqCNbGz_mklcJrt54APIZHZpoTIJLkXqEhhn4Nd8hc,752 -attrs-21.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -attrs-21.4.0.dist-info/LICENSE,sha256=v2WaKLSSQGAvVrvfSQy-LsUJsVuY-Z17GaUsdA4yeGM,1082 -attrs-21.4.0.dist-info/METADATA,sha256=WwgR4MfxE55PpGGv21UOEOEtXZGCqwekfXYg-JgA5HY,9810 -attrs-21.4.0.dist-info/RECORD,, -attrs-21.4.0.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 -attrs-21.4.0.dist-info/top_level.txt,sha256=AGbmKnOtYpdkLRsDRQVSBIwfL32pAQ6BSo1mt-BxI7M,11 -attrs/__init__.py,sha256=CeyxLGVViAEKKsLOLaif8vF3vs1a28vsrRVLv7eMEgM,1109 -attrs/__init__.pyi,sha256=57aCxUJukK9lZlrUgk9RuWiBiPY5DzDKJAJkhbrStYw,1982 -attrs/converters.py,sha256=fCBEdlYWcmI3sCnpUk2pz22GYtXzqTkp6NeOpdI64PY,70 -attrs/exceptions.py,sha256=SlDli6AY77f6ny-H7oy98OkQjsrw-D_supEuErIVYkE,70 -attrs/filters.py,sha256=dc_dNey29kH6KLU1mT2Dakq7tZ3kBfzEGwzOmDzw1F8,67 -attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -attrs/setters.py,sha256=oKw51C72Hh45wTwYvDHJP9kbicxiMhMR4Y5GvdpKdHQ,67 -attrs/validators.py,sha256=4ag1SyVD2Hm3PYKiNG_NOtR_e7f81Hr6GiNl4YvXo4Q,70 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/top_level.txt deleted file mode 100644 index eca8ba9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/top_level.txt +++ /dev/null @@ -1,2 +0,0 @@ -attr -attrs diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/METADATA new file mode 100644 index 0000000..4a986f0 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/METADATA @@ -0,0 +1,243 @@ +Metadata-Version: 2.1 +Name: attrs +Version: 23.1.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: Bug Tracker, https://github.com/python-attrs/attrs/issues +Project-URL: Source Code, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Requires-Dist: importlib-metadata; python_version < '3.8' +Provides-Extra: cov +Requires-Dist: attrs[tests]; extra == 'cov' +Requires-Dist: coverage[toml]>=5.3; extra == 'cov' +Provides-Extra: dev +Requires-Dist: attrs[docs,tests]; extra == 'dev' +Requires-Dist: pre-commit; extra == 'dev' +Provides-Extra: docs +Requires-Dist: furo; extra == 'docs' +Requires-Dist: myst-parser; extra == 'docs' +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: sphinx-notfound-page; extra == 'docs' +Requires-Dist: sphinxcontrib-towncrier; extra == 'docs' +Requires-Dist: towncrier; extra == 'docs' +Requires-Dist: zope-interface; extra == 'docs' +Provides-Extra: tests +Requires-Dist: attrs[tests-no-zope]; extra == 'tests' +Requires-Dist: zope-interface; extra == 'tests' +Provides-Extra: tests-no-zope +Requires-Dist: cloudpickle; platform_python_implementation == 'CPython' and extra == 'tests-no-zope' +Requires-Dist: hypothesis; extra == 'tests-no-zope' +Requires-Dist: mypy>=1.1.1; platform_python_implementation == 'CPython' and extra == 'tests-no-zope' +Requires-Dist: pympler; extra == 'tests-no-zope' +Requires-Dist: pytest-mypy-plugins; platform_python_implementation == 'CPython' and python_version < '3.11' and extra == 'tests-no-zope' +Requires-Dist: pytest-xdist[psutil]; extra == 'tests-no-zope' +Requires-Dist: pytest>=4.3.0; extra == 'tests-no-zope' +Description-Content-Type: text/markdown + +

+ + attrs + +

+ + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + +

+ + + + + + + + + + + + + + + +

+ +

+ Please consider joining them to help make attrs’s maintenance more sustainable! +

+ + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +**Hate type annotations**!? +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Please check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for a more in-depth explanation. + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), or allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization). + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes). + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **License**: [MIT](https://www.attrs.org/en/latest/license.html) +- **Get Help**: please use the `python-attrs` tag on [StackOverflow](https://stackoverflow.com/questions/tagged/python-attrs) +- **Supported Python Versions**: 3.7 and later + + +### *attrs* for Enterprise + +Available as part of the Tidelift Subscription. + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. +[Learn more.](https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=referral&utm_campaign=enterprise&utm_term=repo) + +## Release Information + +### Backwards-incompatible Changes + +- Python 3.6 has been dropped and packaging switched to static package data using [Hatch](https://hatch.pypa.io/latest/). + [#993](https://github.com/python-attrs/attrs/issues/993) + + +### Deprecations + +- The support for *zope-interface* via the `attrs.validators.provides` validator is now deprecated and will be removed in, or after, April 2024. + + The presence of a C-based package in our developement dependencies has caused headaches and we're not under the impression it's used a lot. + + Let us know if you're using it and we might publish it as a separate package. + [#1120](https://github.com/python-attrs/attrs/issues/1120) + + +### Changes + +- `attrs.filters.exclude()` and `attrs.filters.include()` now support the passing of attribute names as strings. + [#1068](https://github.com/python-attrs/attrs/issues/1068) +- `attrs.has()` and `attrs.fields()` now handle generic classes correctly. + [#1079](https://github.com/python-attrs/attrs/issues/1079) +- Fix frozen exception classes when raised within e.g. `contextlib.contextmanager`, which mutates their `__traceback__` attributes. + [#1081](https://github.com/python-attrs/attrs/issues/1081) +- `@frozen` now works with type checkers that implement [PEP-681](https://peps.python.org/pep-0681/) (ex. [pyright](https://github.com/microsoft/pyright/)). + [#1084](https://github.com/python-attrs/attrs/issues/1084) +- Restored ability to unpickle instances pickled before 22.2.0. + [#1085](https://github.com/python-attrs/attrs/issues/1085) +- `attrs.asdict()`'s and `attrs.astuple()`'s type stubs now accept the `attrs.AttrsInstance` protocol. + [#1090](https://github.com/python-attrs/attrs/issues/1090) +- Fix slots class cellvar updating closure in CPython 3.8+ even when `__code__` introspection is unavailable. + [#1092](https://github.com/python-attrs/attrs/issues/1092) +- `attrs.resolve_types()` can now pass `include_extras` to `typing.get_type_hints()` on Python 3.9+, and does so by default. + [#1099](https://github.com/python-attrs/attrs/issues/1099) +- Added instructions for pull request workflow to `CONTRIBUTING.md`. + [#1105](https://github.com/python-attrs/attrs/issues/1105) +- Added *type* parameter to `attrs.field()` function for use with `attrs.make_class()`. + + Please note that type checkers ignore type metadata passed into `make_class()`, but it can be useful if you're wrapping _attrs_. + [#1107](https://github.com/python-attrs/attrs/issues/1107) +- It is now possible for `attrs.evolve()` (and `attr.evolve()`) to change fields named `inst` if the instance is passed as a positional argument. + + Passing the instance using the `inst` keyword argument is now deprecated and will be removed in, or after, April 2024. + [#1117](https://github.com/python-attrs/attrs/issues/1117) +- `attrs.validators.optional()` now also accepts a tuple of validators (in addition to lists of validators). + [#1122](https://github.com/python-attrs/attrs/issues/1122) + + + +--- + +[Full changelog](https://www.attrs.org/en/stable/changelog.html) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/RECORD new file mode 100644 index 0000000..575fa9e --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/RECORD @@ -0,0 +1,36 @@ +attr/__init__.py,sha256=dSRUBxRVTh-dXMrMR_oQ3ZISu2QSfhSZlik03Mjbu30,3241 +attr/__init__.pyi,sha256=rIK-2IakIoehVtqXK5l5rs9_fJNCbnYtKTS3cOAVJD8,17609 +attr/_cmp.py,sha256=diMUQV-BIg7IjIb6-o1hswtnjrR4qdAUz_tE8gxS96w,4098 +attr/_cmp.pyi,sha256=sGQmOM0w3_K4-X8cTXR7g0Hqr290E8PTObA9JQxWQqc,399 +attr/_compat.py,sha256=d3cpIu60IbKrLywPni17RUEQY7MvkqqKifyzJ5H3zRU,5803 +attr/_config.py,sha256=5W8lgRePuIOWu1ZuqF1899e2CmXGc95-ipwTpF1cEU4,826 +attr/_funcs.py,sha256=YMtzHRSOnFvOVJ7at3E0K95A2lW26HDjby96TMTDbc0,16730 +attr/_make.py,sha256=JIyKV-HRh3IcHi-EvOj2dw6tRoqATlx2kBHFrrxZpk0,96979 +attr/_next_gen.py,sha256=8lB_S5SFgX2KsflksK8Zygk6XDXToQYtIlmgd37I9aY,6271 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=xfGVSPRgWGcym6N5FZM9fyfvCQePqFyApWeC5BXKvoM,3602 +attr/converters.pyi,sha256=jKlpHBEt6HVKJvgrMFJRrHq8p61GXg4-Nd5RZWKJX7M,406 +attr/exceptions.py,sha256=0ZTyH_mHmI9utwTTbBWrdS_ck5jps9R2M_fYJPXxH_U,1890 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=9pYvXqdg6mtLvKIIb56oALRMoHFnQTcGCO4EXTc1qyM,1470 +attr/filters.pyi,sha256=0mRCjLKxdcvAo0vD-Cr81HfRXXCp9j_cAXjOoAHtPGM,225 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=pbCZQ-pE6ZxjDqZfWWUhUFefXtpekIU4qS_YDMLPQ50,1400 +attr/setters.pyi,sha256=pyY8TVNBu8TWhOldv_RxHzmGvdgFQH981db70r0fn5I,567 +attr/validators.py,sha256=C2MQgX7ubL_cs5YzibWa8m0YxdMq5_3Ch3dVIzsLO-Y,20702 +attr/validators.pyi,sha256=167Dl9nt7NUhE9wht1I-buo039qyUT1nEUT_nKjSWr4,2580 +attrs-23.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-23.1.0.dist-info/METADATA,sha256=yglwUXko75Q-IJ6LmPVQ4Y99KJS3CPK0NW8ovXFYsDg,11348 +attrs-23.1.0.dist-info/RECORD,, +attrs-23.1.0.dist-info/WHEEL,sha256=EI2JsGydwUL5GP9t6kzZv7G3HDPi7FuZDDf9In6amRM,87 +attrs-23.1.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=9_5waVbFs7rLqtXZ73tNDrxhezyZ8VZeX4BbvQ3EeJw,1039 +attrs/__init__.pyi,sha256=s_ajQ_U14DOsOz0JbmAKDOi46B3v2PcdO0UAV1MY6Ek,2168 +attrs/converters.py,sha256=fCBEdlYWcmI3sCnpUk2pz22GYtXzqTkp6NeOpdI64PY,70 +attrs/exceptions.py,sha256=SlDli6AY77f6ny-H7oy98OkQjsrw-D_supEuErIVYkE,70 +attrs/filters.py,sha256=dc_dNey29kH6KLU1mT2Dakq7tZ3kBfzEGwzOmDzw1F8,67 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=oKw51C72Hh45wTwYvDHJP9kbicxiMhMR4Y5GvdpKdHQ,67 +attrs/validators.py,sha256=4ag1SyVD2Hm3PYKiNG_NOtR_e7f81Hr6GiNl4YvXo4Q,70 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/WHEEL similarity index 67% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/WHEEL index 862df1f..58d0071 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 -Generator: poetry 1.0.3 +Generator: hatchling 1.14.0 Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/licenses/LICENSE similarity index 94% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/licenses/LICENSE index 7ae3df9..2bd6453 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/LICENSE +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-23.1.0.dist-info/licenses/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2015 Hynek Schlawack +Copyright (c) 2015 Hynek Schlawack and the attrs contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.py old mode 100755 new mode 100644 index a704b8b..0c24815 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.py @@ -3,17 +3,9 @@ from attr import ( NOTHING, Attribute, + AttrsInstance, Factory, - __author__, - __copyright__, - __description__, - __doc__, - __email__, - __license__, - __title__, - __url__, - __version__, - __version_info__, + _make_getattr, assoc, cmp_using, define, @@ -48,6 +40,7 @@ "assoc", "astuple", "Attribute", + "AttrsInstance", "cmp_using", "converters", "define", @@ -68,3 +61,5 @@ "validate", "validators", ] + +__getattr__ = _make_getattr(__name__) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.pyi index 7426fa5..9372cfe 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.pyi +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/__init__.pyi @@ -23,13 +23,17 @@ from attr import __version_info__ as __version_info__ from attr import _FilterType from attr import assoc as assoc from attr import Attribute as Attribute +from attr import AttrsInstance as AttrsInstance +from attr import cmp_using as cmp_using +from attr import converters as converters from attr import define as define from attr import evolve as evolve -from attr import Factory as Factory from attr import exceptions as exceptions +from attr import Factory as Factory from attr import field as field from attr import fields as fields from attr import fields_dict as fields_dict +from attr import filters as filters from attr import frozen as frozen from attr import has as has from attr import make_class as make_class @@ -42,7 +46,7 @@ from attr import validators as validators # TODO: see definition of attr.asdict/astuple def asdict( - inst: Any, + inst: AttrsInstance, recurse: bool = ..., filter: Optional[_FilterType[Any]] = ..., dict_factory: Type[Mapping[Any, Any]] = ..., @@ -55,7 +59,7 @@ def asdict( # TODO: add support for returning NamedTuple from the mypy plugin def astuple( - inst: Any, + inst: AttrsInstance, recurse: bool = ..., filter: Optional[_FilterType[Any]] = ..., tuple_factory: Type[Sequence[Any]] = ..., diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/converters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/converters.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/filters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/filters.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/setters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/setters.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/validators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs/validators.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/futurize b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/futurize deleted file mode 100644 index 392e2fa..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/futurize +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys -from libfuturize.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/json b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/json old mode 100644 new mode 100755 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/jsonpath_ng b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/jsonpath_ng old mode 100644 new mode 100755 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/jsonschema b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/jsonschema old mode 100644 new mode 100755 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/mako-render b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/mako-render old mode 100644 new mode 100755 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/normalizer b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/normalizer old mode 100644 new mode 100755 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/pasteurize b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/pasteurize deleted file mode 100644 index 6106a53..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/bin/pasteurize +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys -from libpasteurize.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/RECORD deleted file mode 100644 index 0c541d4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/RECORD +++ /dev/null @@ -1,10 +0,0 @@ -certifi-2021.10.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -certifi-2021.10.8.dist-info/LICENSE,sha256=vp2C82ES-Hp_HXTs1Ih-FGe7roh4qEAEoAEXseR1o-I,1049 -certifi-2021.10.8.dist-info/METADATA,sha256=iB_zbT1uX_8_NC7iGv0YEB-9b3idhQwHrFTSq8R1kD8,2994 -certifi-2021.10.8.dist-info/RECORD,, -certifi-2021.10.8.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110 -certifi-2021.10.8.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 -certifi/__init__.py,sha256=xWdRgntT3j1V95zkRipGOg_A1UfEju2FcpujhysZLRI,62 -certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243 -certifi/cacert.pem,sha256=-og4Keu4zSpgL5shwfhd4kz0eUnVILzrGCi0zRy2kGw,265969 -certifi/core.py,sha256=V0uyxKOYdz6ulDSusclrLmjbPgOXsD0BnEf0SQ7OnoE,2303 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/WHEEL deleted file mode 100644 index 6d38aa0..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.35.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/LICENSE similarity index 91% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/LICENSE index c2fda9a..0a64774 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/LICENSE +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/LICENSE @@ -6,7 +6,7 @@ Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011# This is a bundle of X.509 certificates of public Certificate Authorities (CA). These were automatically extracted from Mozilla's root certificates file (certdata.txt). This file can be found in the mozilla source tree: -http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1# +https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt It contains the certificates in PEM format and therefore can be directly used with curl / libcurl / php_curl, or with an Apache+mod_ssl webserver for SSL client authentication. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/METADATA similarity index 85% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/METADATA index 7a6860d..aeb1991 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/METADATA @@ -1,12 +1,11 @@ Metadata-Version: 2.1 Name: certifi -Version: 2021.10.8 +Version: 2022.12.7 Summary: Python package for providing Mozilla's CA Bundle. -Home-page: https://certifiio.readthedocs.io/en/latest/ +Home-page: https://github.com/certifi/python-certifi Author: Kenneth Reitz Author-email: me@kennethreitz.com License: MPL-2.0 -Project-URL: Documentation, https://certifiio.readthedocs.io/en/latest/ Project-URL: Source, https://github.com/certifi/python-certifi Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable @@ -15,18 +14,20 @@ Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) Classifier: Natural Language :: English Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.6 +License-File: LICENSE Certifi: Python SSL Certificates ================================ -`Certifi`_ provides Mozilla's carefully curated collection of Root Certificates for +Certifi provides Mozilla's carefully curated collection of Root Certificates for validating the trustworthiness of SSL certificates while verifying the identity of TLS hosts. It has been extracted from the `Requests`_ project. @@ -69,7 +70,6 @@ In previous versions, ``certifi`` provided the ``certifi.old_where()`` function to intentionally re-add the 1024-bit roots back into your bundle. This was not recommended in production and therefore was removed at the end of 2018. -.. _`Certifi`: https://certifiio.readthedocs.io/en/latest/ .. _`Requests`: https://requests.readthedocs.io/en/master/ Addition/Removal of Certificates diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/RECORD new file mode 100644 index 0000000..1f00d6a --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/RECORD @@ -0,0 +1,11 @@ +certifi-2022.12.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +certifi-2022.12.7.dist-info/LICENSE,sha256=oC9sY4-fuE0G93ZMOrCF2K9-2luTwWbaVDEkeQd8b7A,1052 +certifi-2022.12.7.dist-info/METADATA,sha256=chFpcxKhCPEQ3d8-Vz36zr2Micf1eQhKkFFk7_JvJNo,2911 +certifi-2022.12.7.dist-info/RECORD,, +certifi-2022.12.7.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +certifi-2022.12.7.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 +certifi/__init__.py,sha256=bK_nm9bLJzNvWZc2oZdiTwg2KWD4HSPBWGaM0zUDvMw,94 +certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243 +certifi/cacert.pem,sha256=LBHDzgj_xA05AxnHK8ENT5COnGNElNZe0svFUHMf1SQ,275233 +certifi/core.py,sha256=lhewz0zFb2b4ULsQurElmloYwQoecjWzPqY67P8T7iM,4219 +certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/WHEEL similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/WHEEL diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2021.10.8.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi-2022.12.7.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/__init__.py old mode 100755 new mode 100644 index 8db1a0e..a3546f1 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/__init__.py @@ -1,3 +1,4 @@ from .core import contents, where -__version__ = "2021.10.08" +__all__ = ["contents", "where"] +__version__ = "2022.12.07" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/__main__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/__main__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/cacert.pem b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/cacert.pem index 6d0ccc0..df9e4e3 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/cacert.pem +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/cacert.pem @@ -28,36 +28,6 @@ DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== -----END CERTIFICATE----- -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 -# Label: "GlobalSign Root CA - R2" -# Serial: 4835703278459682885658125 -# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 -# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe -# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e ------BEGIN CERTIFICATE----- -MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G -A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp -Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 -MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG -A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL -v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 -eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq -tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd -C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa -zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB -mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH -V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n -bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG -3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs -J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO -291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS -ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd -AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 -TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== ------END CERTIFICATE----- - # Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Label: "Entrust.net Premium 2048 Secure Server CA" @@ -491,34 +461,6 @@ vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep +OkuE6N36B9K -----END CERTIFICATE----- -# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. -# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. -# Label: "DST Root CA X3" -# Serial: 91299735575339953335919266965803778155 -# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5 -# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13 -# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39 ------BEGIN CERTIFICATE----- -MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ -MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT -DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow -PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD -Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O -rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq -OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b -xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw -7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD -aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG -SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 -ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr -AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz -R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 -JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo -Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ ------END CERTIFICATE----- - # Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG # Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG # Label: "SwissSign Gold CA - G2" @@ -694,37 +636,6 @@ BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB ZQ== -----END CERTIFICATE----- -# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. -# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. -# Label: "Network Solutions Certificate Authority" -# Serial: 116697915152937497490437556386812487904 -# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e -# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce -# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c ------BEGIN CERTIFICATE----- -MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi -MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu -MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp -dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV -UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO -ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz -c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP -OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl -mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF -BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 -qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw -gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu -bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp -dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 -6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ -h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH -/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv -wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN -pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey ------END CERTIFICATE----- - # Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited # Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited # Label: "COMODO ECC Certification Authority" @@ -779,36 +690,6 @@ t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== -----END CERTIFICATE----- -# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc -# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc -# Label: "Cybertrust Global Root" -# Serial: 4835703278459682877484360 -# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 -# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 -# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 ------BEGIN CERTIFICATE----- -MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG -A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh -bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE -ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS -b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 -7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS -J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y -HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP -t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz -FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY -XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ -MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw -hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js -MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA -A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj -Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx -XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o -omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc -A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW -WL1WMRJOEcgh4LMRkWXbtKaIOM5V ------END CERTIFICATE----- - # Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority # Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority # Label: "ePKI Root Certification Authority" @@ -1411,78 +1292,6 @@ t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 -----END CERTIFICATE----- -# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes -# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes -# Label: "EC-ACC" -# Serial: -23701579247955709139626555126524820479 -# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09 -# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8 -# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99 ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB -8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy -dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1 -YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3 -dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh -IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD -LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG -EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g -KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD -ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu -bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg -ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R -85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm -4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV -HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd -QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t -lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB -o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4 -opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo -dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW -ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN -AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y -/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k -SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy -Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS -Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl -nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI= ------END CERTIFICATE----- - -# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority -# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority -# Label: "Hellenic Academic and Research Institutions RootCA 2011" -# Serial: 0 -# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 -# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d -# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 ------BEGIN CERTIFICATE----- -MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix -RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 -dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p -YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw -NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK -EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl -cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl -c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB -BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz -dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ -fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns -bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD -75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP -FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV -HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp -5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu -b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA -A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p -6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 -TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 -dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys -Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI -l7WdmplNsDz4SgCbZN2fOUvRJ9e4 ------END CERTIFICATE----- - # Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 # Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 # Label: "Actalis Authentication Root CA" @@ -2342,27 +2151,6 @@ zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= -----END CERTIFICATE----- -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 -# Label: "GlobalSign ECC Root CA - R4" -# Serial: 14367148294922964480859022125800977897474 -# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e -# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb -# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c ------BEGIN CERTIFICATE----- -MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk -MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH -bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX -DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD -QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ -FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw -DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F -uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX -kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs -ewv4n4Q= ------END CERTIFICATE----- - # Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 # Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 # Label: "GlobalSign ECC Root CA - R5" @@ -2385,46 +2173,6 @@ KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg xwy8p2Fp8fc74SrL+SvzZpA3 -----END CERTIFICATE----- -# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden -# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden -# Label: "Staat der Nederlanden EV Root CA" -# Serial: 10000013 -# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba -# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb -# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a ------BEGIN CERTIFICATE----- -MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO -TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh -dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y -MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg -TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS -b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS -M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC -UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d -Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p -rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l -pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb -j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC -KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS -/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X -cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH -1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP -px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB -/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 -MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI -eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u -2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS -v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC -wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy -CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e -vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 -Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa -Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL -eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 -FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc -7uzXLg== ------END CERTIFICATE----- - # Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust # Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust # Label: "IdenTrust Commercial Root CA 1" @@ -3032,116 +2780,6 @@ T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== -----END CERTIFICATE----- -# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor RootCert CA-1" -# Serial: 15752444095811006489 -# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 -# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a -# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c ------BEGIN CERTIFICATE----- -MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD -VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk -MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U -cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y -IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB -pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h -IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG -A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU -cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid -RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V -seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme -9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV -EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW -hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ -DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw -DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD -ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I -/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf -ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ -yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts -L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN -zl/HHk484IkzlQsPpTLWPFp5LBk= ------END CERTIFICATE----- - -# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor RootCert CA-2" -# Serial: 2711694510199101698 -# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 -# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 -# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 ------BEGIN CERTIFICATE----- -MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV -BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw -IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy -dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig -Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk -MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg -Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD -VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy -dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ -QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq -1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp -2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK -DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape -az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF -3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 -oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM -g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 -mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh -8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd -BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U -nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw -DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX -dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ -MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL -/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX -CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa -ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW -2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 -N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 -Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB -As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp -5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu -1uwJ ------END CERTIFICATE----- - -# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor ECA-1" -# Serial: 9548242946988625984 -# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c -# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd -# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD -VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk -MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U -cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y -IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV -BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw -IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy -dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig -RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb -3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA -BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 -3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou -owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ -wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF -ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf -BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ -MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv -civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 -AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F -hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 -soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI -WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi -tJ/X5g== ------END CERTIFICATE----- - # Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation # Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation # Label: "SSL.com Root Certification Authority RSA" @@ -3337,126 +2975,6 @@ rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 -----END CERTIFICATE----- -# Issuer: CN=GTS Root R1 O=Google Trust Services LLC -# Subject: CN=GTS Root R1 O=Google Trust Services LLC -# Label: "GTS Root R1" -# Serial: 146587175971765017618439757810265552097 -# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85 -# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8 -# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72 ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH -MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM -QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy -MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl -cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB -AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM -f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX -mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7 -zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P -fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc -vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4 -Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp -zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO -Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW -k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+ -DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF -lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV -HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW -Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1 -d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z -XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR -gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3 -d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv -J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg -DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM -+SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy -F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9 -SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws -E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R2 O=Google Trust Services LLC -# Subject: CN=GTS Root R2 O=Google Trust Services LLC -# Label: "GTS Root R2" -# Serial: 146587176055767053814479386953112547951 -# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b -# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d -# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60 ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH -MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM -QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy -MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl -cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB -AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv -CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg -GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu -XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd -re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu -PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1 -mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K -8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj -x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR -nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0 -kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok -twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV -HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp -8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT -vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT -z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA -pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb -pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB -R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R -RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk -0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC -5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF -izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn -yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R3 O=Google Trust Services LLC -# Subject: CN=GTS Root R3 O=Google Trust Services LLC -# Label: "GTS Root R3" -# Serial: 146587176140553309517047991083707763997 -# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25 -# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5 -# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5 ------BEGIN CERTIFICATE----- -MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw -CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU -MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw -MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp -Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA -IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout -736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A -DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud -DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk -fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA -njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R4 O=Google Trust Services LLC -# Subject: CN=GTS Root R4 O=Google Trust Services LLC -# Label: "GTS Root R4" -# Serial: 146587176229350439916519468929765261721 -# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26 -# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb -# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd ------BEGIN CERTIFICATE----- -MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw -CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU -MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw -MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp -Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA -IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu -hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l -xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud -DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0 -CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx -sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w== ------END CERTIFICATE----- - # Issuer: CN=UCA Global G2 Root O=UniTrust # Subject: CN=UCA Global G2 Root O=UniTrust # Label: "UCA Global G2 Root" @@ -4360,3 +3878,650 @@ AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps -----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 1977337328857672817 +# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3 +# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe +# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1 +MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc +tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd +IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC +AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw +ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m +iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF +Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ +hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P +Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE +EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV +1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t +CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR +5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw +f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9 +ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK +GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus ECC Root CA" +# Serial: 630369271402956006249506845124680065938238527194 +# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85 +# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1 +# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3 +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw +RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY +BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz +MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u +LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0 +v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd +e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw +V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA +AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG +GJTO +-----END CERTIFICATE----- + +# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus Root CA" +# Serial: 387574501246983434957692974888460947164905180485 +# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc +# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7 +# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87 +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL +BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x +FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx +MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s +THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc +IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU +AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+ +GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9 +8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH +flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt +J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim +0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN +pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ +UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW +OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB +AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet +8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j +bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM +Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv +TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS +S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr +I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9 +b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB +UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P +Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven +sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s= +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X2 O=Internet Security Research Group +# Subject: CN=ISRG Root X2 O=Internet Security Research Group +# Label: "ISRG Root X2" +# Serial: 87493402998870891108772069816698636114 +# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5 +# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af +# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70 +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw +CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg +R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 +MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT +ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW ++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 +ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI +zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW +tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 +/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Label: "HiPKI Root CA - G1" +# Serial: 60966262342023497858655262305426234976 +# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3 +# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60 +# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa +Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3 +YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw +qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv +Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6 +lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz +Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ +KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK +FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj +HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr +y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ +/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM +a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6 +fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG +SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc +SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza +ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc +XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg +iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho +L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF +Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr +kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+ +vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU +YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 159662223612894884239637590694 +# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc +# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28 +# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 159662320309726417404178440727 +# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40 +# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a +# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 159662449406622349769042896298 +# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc +# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94 +# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 159662495401136852707857743206 +# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73 +# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46 +# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 159662532700760215368942768210 +# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8 +# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47 +# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj +# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj +# Label: "Telia Root CA v2" +# Serial: 7288924052977061235122729490515358 +# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48 +# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd +# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx +CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE +AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1 +NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ +MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq +AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9 +vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9 +lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD +n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT +7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o +6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC +TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6 +WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R +DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI +pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj +YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy +rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi +0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM +A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS +SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K +TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF +6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er +3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt +Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT +VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW +ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA +rBPuUBQemMc= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 1 2020" +# Serial: 165870826978392376648679885835942448534 +# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed +# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67 +# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44 +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5 +NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS +zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0 +QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/ +VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW +wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV +dWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 1 2020" +# Serial: 126288379621884218666039612629459926992 +# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e +# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07 +# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5 +NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC +/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD +wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3 +OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA +y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb +gfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS ECC P384 Root G5" +# Serial: 13129116028163249804115411775095713523 +# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed +# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee +# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05 +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp +Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2 +MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ +bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS +7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp +0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS +B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49 +BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ +LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4 +DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS RSA4096 Root G5" +# Serial: 11930366277458970227240571539258396554 +# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1 +# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35 +# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75 +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN +MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT +HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN +NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs +IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+ +ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0 +2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp +wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM +pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD +nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po +sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx +Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd +Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX +KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe +XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL +tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv +TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN +AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H +PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF +O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ +REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik +AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv +/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+ +p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw +MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF +qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK +ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root R1 O=Certainly +# Subject: CN=Certainly Root R1 O=Certainly +# Label: "Certainly Root R1" +# Serial: 188833316161142517227353805653483829216 +# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12 +# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af +# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw +PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy +dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9 +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0 +YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2 +1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT +vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed +aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0 +1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5 +r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5 +cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ +wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ +6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA +2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH +Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR +eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u +d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr +PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi +1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd +rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di +taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7 +lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj +yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn +Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy +yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n +wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6 +OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root E1 O=Certainly +# Subject: CN=Certainly Root E1 O=Certainly +# Label: "Certainly Root E1" +# Serial: 8168531406727139161245376702891150584 +# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9 +# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b +# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2 +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw +CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu +bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ +BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s +eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK ++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2 +QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4 +hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm +ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG +BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Subject: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Label: "E-Tugra Global Root CA RSA v3" +# Serial: 75951268308633135324246244059508261641472512052 +# MD5 Fingerprint: 22:be:10:f6:c2:f8:03:88:73:5f:33:29:47:28:47:a4 +# SHA1 Fingerprint: e9:a8:5d:22:14:52:1c:5b:aa:0a:b4:be:24:6a:23:8a:c9:ba:e2:a9 +# SHA256 Fingerprint: ef:66:b0:b1:0a:3c:db:9f:2e:36:48:c7:6b:d2:af:18:ea:d2:bf:e6:f1:17:65:5e:28:c4:06:0d:a1:a3:f4:c2 +-----BEGIN CERTIFICATE----- +MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQEL +BQAwgYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUt +VHVncmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYw +JAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIFJTQSB2MzAeFw0yMDAzMTgw +OTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMG +QW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1 +Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBD +QSBSU0EgdjMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J7 +7gnJY9LTQ91ew6aEOErxjYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscx +uj7X/iWpKo429NEvx7epXTPcMHD4QGxLsqYxYdE0PD0xesevxKenhOGXpOhL9hd8 +7jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF/YP9f4RtNGx/ardLAQO/ +rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8qQedmCeFL +l+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bG +wzrwbMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4 +znKS4iicvObpCdg604nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBO +M/J+JjKsBY04pOZ2PJ8QaQ5tndLBeSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK +5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiMbIedBi3x7+PmBvrFZhNb/FAH +nnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbgh3cXTJ2w2Amo +DVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSy +tK7mLfcm1ap1LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEL +BQADggIBAImocn+M684uGMQQgC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ +6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN438o2Fi+CiJ+8EUdPdk3ILY7r3y18 +Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/qln0F7psTpURs+APQ +3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3sSdPk +vmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn9 +9t2HVhjYsCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQ +mhty3QUBjYZgv6Rn7rWlDdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YA +VSgU7NbHEqIbZULpkejLPoeJVF3Zr52XnGnnCv8PWniLYypMfUeUP95L6VPQMPHF +9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFHIK+WEj5jlB0E5y67hscM +moi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiXYY60MGo8 +bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Subject: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Label: "E-Tugra Global Root CA ECC v3" +# Serial: 218504919822255052842371958738296604628416471745 +# MD5 Fingerprint: 46:bc:81:bb:f1:b5:1e:f7:4b:96:bc:14:e2:e7:27:64 +# SHA1 Fingerprint: 8a:2f:af:57:53:b1:b0:e6:a1:04:ec:5b:6a:69:71:6d:f6:1c:e2:84 +# SHA256 Fingerprint: 87:3f:46:85:fa:7f:56:36:25:25:2e:6d:36:bc:d7:f1:6f:c2:49:51:f2:64:e4:7e:1b:95:4f:49:08:cd:ca:13 +-----BEGIN CERTIFICATE----- +MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMw +gYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVn +cmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYD +VQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIEVDQyB2MzAeFw0yMDAzMTgwOTQ2 +NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMGQW5r +YXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1Z3Jh +IFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBF +Q0MgdjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQ +KczLWYHMjLiSF4mDKpL2w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YK +fWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMB +Af8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQzPUwHQYDVR0OBBYEFP+C +MXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNp +ADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/6 +7W4WAie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFx +vmjkI6TZraE3 +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication RootCA3" +# Serial: 16247922307909811815 +# MD5 Fingerprint: 1c:9a:16:ff:9e:5c:e0:4d:8a:14:01:f4:35:5d:29:26 +# SHA1 Fingerprint: c3:03:c8:22:74:92:e5:61:a2:9c:5f:79:91:2b:1e:44:13:91:30:3a +# SHA256 Fingerprint: 24:a5:5c:2a:b0:51:44:2d:06:17:76:65:41:23:9a:4a:d0:32:d7:c5:51:75:aa:34:ff:de:2f:bc:4f:5c:52:94 +-----BEGIN CERTIFICATE----- +MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNV +BAYTAkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScw +JQYDVQQDEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2 +MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UEAxMeU2VjdXJpdHkg +Q29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4r +CmDvu20rhvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzA +lrenfna84xtSGc4RHwsENPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MG +TfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF7 +9+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGmnpjKIG58u4iFW/vAEGK7 +8vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtYXLVqAvO4 +g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3we +GVPKp7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst ++3A7caoreyYn8xrC3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M +0V9hvqG8OmpI6iZVIhZdXw3/JzOfGAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQ +T9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0VcwCBEF/VfR2ccCAwEAAaNCMEAw +HQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS +YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PA +FNr0Y/Dq9HHuTofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd +9XbXv8S2gVj/yP9kaWJ5rW4OH3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQI +UYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASxYfQAW0q3nHE3GYV5v4GwxxMOdnE+ +OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZXSEIx2C/pHF7uNke +gr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml+LLf +iAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUV +nuiZIesnKwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD +2NCcnWXL0CsnMQMeNuE9dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI// +1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm6Vwdp6POXiUyK+OVrCoHzrQoeIY8Laad +TdJ0MN1kURXbg4NR16/9M51NZg== +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication ECC RootCA1" +# Serial: 15446673492073852651 +# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86 +# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41 +# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11 +-----BEGIN CERTIFICATE----- +MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT +AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD +VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx +NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT +HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5 +IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl +dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK +ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu +9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O +be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k= +-----END CERTIFICATE----- diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/core.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/core.py old mode 100755 new mode 100644 index 5d2b8cd..de02898 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/core.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/core.py @@ -1,20 +1,20 @@ -# -*- coding: utf-8 -*- - """ certifi.py ~~~~~~~~~~ This module returns the installation location of cacert.pem or its contents. """ -import os +import sys -try: - from importlib.resources import path as get_path, read_text + +if sys.version_info >= (3, 11): + + from importlib.resources import as_file, files _CACERT_CTX = None _CACERT_PATH = None - def where(): + def where() -> str: # This is slightly terrible, but we want to delay extracting the file # in cases where we're inside of a zipimport situation until someone # actually calls where(), but we don't want to re-extract the file @@ -33,28 +33,76 @@ def where(): # We also have to hold onto the actual context manager, because # it will do the cleanup whenever it gets garbage collected, so # we will also store that at the global level as well. + _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem")) + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + + return _CACERT_PATH + + def contents() -> str: + return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii") + +elif sys.version_info >= (3, 7): + + from importlib.resources import path as get_path, read_text + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the + # file in cases where we're inside of a zipimport situation until + # someone actually calls where(), but we don't want to re-extract + # the file on every call of where(), so we'll do it once then store + # it in a global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you + # to manage the cleanup of this file, so it doesn't actually + # return a path, it returns a context manager that will give + # you the path when you enter it and will do any cleanup when + # you leave it. In the common case of not needing a temporary + # file, it will just return the file system location and the + # __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. _CACERT_CTX = get_path("certifi", "cacert.pem") _CACERT_PATH = str(_CACERT_CTX.__enter__()) return _CACERT_PATH + def contents() -> str: + return read_text("certifi", "cacert.pem", encoding="ascii") + +else: + import os + import types + from typing import Union + + Package = Union[types.ModuleType, str] + Resource = Union[str, "os.PathLike"] -except ImportError: # This fallback will work for Python versions prior to 3.7 that lack the # importlib.resources module but relies on the existing `where` function # so won't address issues with environments like PyOxidizer that don't set # __file__ on modules. - def read_text(_module, _path, encoding="ascii"): - with open(where(), "r", encoding=encoding) as data: + def read_text( + package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict' + ) -> str: + with open(where(), encoding=encoding) as data: return data.read() # If we don't have importlib.resources, then we will just do the old logic # of assuming we're on the filesystem and munge the path directly. - def where(): + def where() -> str: f = os.path.dirname(__file__) return os.path.join(f, "cacert.pem") - -def contents(): - return read_text("certifi", "cacert.pem", encoding="ascii") + def contents() -> str: + return read_text("certifi", "cacert.pem", encoding="ascii") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/py.typed old mode 100755 new mode 100644 similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/__init__.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/certifi/py.typed diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/LICENSE similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/LICENSE diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/METADATA similarity index 99% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/METADATA index 96c424d..1b04ed4 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: charset-normalizer -Version: 2.0.11 +Version: 2.0.12 Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet. Home-page: https://github.com/ousret/charset_normalizer Author: Ahmed TAHRI @Ousret diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/RECORD similarity index 64% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/RECORD rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/RECORD index c164ab2..d5a8e21 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/RECORD @@ -1,11 +1,11 @@ ../../bin/normalizer,sha256=_6VNRnw7MyVCL-CINQ044FaHcpLHwQVgd-_D0cWq4KI,244 -charset_normalizer-2.0.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -charset_normalizer-2.0.11.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_vPNcgIT0cz-4kE,1070 -charset_normalizer-2.0.11.dist-info/METADATA,sha256=E6L1l_DgbZV6HaN4cwASdSKTEL7aDI2tDd-BV6WOuJ8,11713 -charset_normalizer-2.0.11.dist-info/RECORD,, -charset_normalizer-2.0.11.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -charset_normalizer-2.0.11.dist-info/entry_points.txt,sha256=5AJq_EPtGGUwJPgQLnBZfbVr-FYCIwT0xP7dIEZO3NI,77 -charset_normalizer-2.0.11.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19 +charset_normalizer-2.0.12.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +charset_normalizer-2.0.12.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_vPNcgIT0cz-4kE,1070 +charset_normalizer-2.0.12.dist-info/METADATA,sha256=eX-U3s7nb6wcvXZFyM1mdBf1yz4I0msVBgNvLEscAbo,11713 +charset_normalizer-2.0.12.dist-info/RECORD,, +charset_normalizer-2.0.12.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +charset_normalizer-2.0.12.dist-info/entry_points.txt,sha256=5AJq_EPtGGUwJPgQLnBZfbVr-FYCIwT0xP7dIEZO3NI,77 +charset_normalizer-2.0.12.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19 charset_normalizer/__init__.py,sha256=x2A2OW29MBcqdxsvy6t1wzkUlH3ma0guxL6ZCfS8J94,1790 charset_normalizer/api.py,sha256=r__Wz85F5pYOkRwEY5imXY_pCZ2Nil1DkdaAJY7T5o0,20303 charset_normalizer/assets/__init__.py,sha256=FPnfk8limZRb8ZIUQcTvPEcbuM1eqOdWGw0vbWGycDs,25485 @@ -14,8 +14,8 @@ charset_normalizer/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3 charset_normalizer/cli/normalizer.py,sha256=LkeFIRc1l28rOgXpEby695x0bcKQv4D8z9FmA3Z2c3A,9364 charset_normalizer/constant.py,sha256=51u_RS10I1vYVpBao__xHqf--HHNrR6me1A1se5r5Y0,19449 charset_normalizer/legacy.py,sha256=XKeZOts_HdYQU_Jb3C9ZfOjY2CiUL132k9_nXer8gig,3384 -charset_normalizer/md.py,sha256=5RqkWZ3tjhN1njPYHMegBRKACswSSaKDaoG8LDnAHaE,18176 +charset_normalizer/md.py,sha256=WEwnu2MyIiMeEaorRduqcTxGjIBclWIG3i-9_UL6LLs,18191 charset_normalizer/models.py,sha256=XrGpVxfonhcilIWC1WeiP3-ZORGEe_RG3sgrfPLl9qM,13303 charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 charset_normalizer/utils.py,sha256=AWSL0z1B42IwdLfjX4ZMASA9cTUsTp0PweCdW98SI-4,9308 -charset_normalizer/version.py,sha256=GitCC3fnWcIPkkmOsF6ntg6q4HiaNBUMZdT-KwaFb6g,80 +charset_normalizer/version.py,sha256=uxO2cT0YIavQv4dQlNGmHPIOOwOa-exspxXi3IR7dck,80 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/WHEEL similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/Mako-1.1.0.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/WHEEL diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/entry_points.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/entry_points.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/entry_points.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/entry_points.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.12.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/api.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/api.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/assets/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/assets/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cd.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cd.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cli/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cli/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cli/normalizer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/cli/normalizer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/constant.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/constant.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/legacy.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/legacy.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/md.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/md.py old mode 100755 new mode 100644 index b55e95c..f3d6505 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/md.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/md.py @@ -314,7 +314,7 @@ def feed(self, character: str) -> None: self._buffer = "" self._buffer_accent_count = 0 elif ( - character not in {"<", ">", "-", "="} + character not in {"<", ">", "-", "=", "~", "|", "_"} and character.isdigit() is False and is_symbol(character) ): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/models.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/models.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/utils.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/version.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/version.py old mode 100755 new mode 100644 index 69bf050..77cfff2 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/version.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer/version.py @@ -2,5 +2,5 @@ Expose version """ -__version__ = "2.0.11" +__version__ = "2.0.12" VERSION = __version__.split(".") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/RECORD deleted file mode 100644 index ad19407..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/RECORD +++ /dev/null @@ -1,47 +0,0 @@ -cloudconnectlib-3.1.0b3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -cloudconnectlib-3.1.0b3.dist-info/METADATA,sha256=DyWRwB9ZlMrUQInKYnKz-QQFj9oUMdoz_zTBJuNarkQ,1035 -cloudconnectlib-3.1.0b3.dist-info/RECORD,, -cloudconnectlib-3.1.0b3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -cloudconnectlib-3.1.0b3.dist-info/WHEEL,sha256=V7iVckP-GYreevsTDnv1eAinQt_aArwnAxmnP0gygBY,83 -cloudconnectlib/__init__.py,sha256=LX0oPKTm3RdEe2ugCrqBoMIS9iQIS9lcBZ0hUqRObKY,773 -cloudconnectlib/client.py,sha256=KVdsm08ROyYSt0yFUcFrtxbsgEE7vzcpWleAtDSarSk,3180 -cloudconnectlib/common/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 -cloudconnectlib/common/lib_util.py,sha256=4XEHpcFs2oOUylqa8KQp_90UQAXMCHIPz7VrwBjzsII,2115 -cloudconnectlib/common/log.py,sha256=3SF9F4IDjcpYaFrrMxgNNI4Yjja1AAFgtVzFGahMUpU,1896 -cloudconnectlib/common/util.py,sha256=cjb2ELvt4dxnCynxQxdvsLsMwBw7m22aT1NKRTRlccA,2058 -cloudconnectlib/configuration/__init__.py,sha256=vDmcGJjnxxkHIICiIOeK0FSJK_hxl3SJGHwdCDbYzMc,617 -cloudconnectlib/configuration/loader.py,sha256=6UlfhAKZnAOFe_zpkjP2KfYewOA_bR4CfMVpQvn3Oc4,10682 -cloudconnectlib/configuration/schema_1_0_0.json,sha256=WKWbZJAyWu-rt-OLwvNoMo_UqBi3iwxHbIcRy0aSRy0,11645 -cloudconnectlib/core/__init__.py,sha256=02DdxhQvHpl-ncbu3B3GY1qvoWVHhhpSyYP4Gez9bxU,665 -cloudconnectlib/core/cacerts/ca_certs_locater.py,sha256=PtvX2_8dERSUnANIOTISBT8-QSyf6SOdDSFHH6Ls-og,4889 -cloudconnectlib/core/checkpoint.py,sha256=n45i3EPS8CNkJDIzmTQT0BNpmY8Fn6UNBdk76Wyacpo,1918 -cloudconnectlib/core/defaults.py,sha256=93J1wkfTVW4qVA0GjtDMGtPRhr8Uhd2_WZCdei5eOJI,1288 -cloudconnectlib/core/engine.py,sha256=nChmGutqzx0sfmd5-YaOfDNDXeujgeD5miSFTGNuV5U,10884 -cloudconnectlib/core/engine_v2.py,sha256=vXIhijoWY4p-tNYussNBvwFkIjXCaytdntf2yD_1jFg,4985 -cloudconnectlib/core/exceptions.py,sha256=GLDNVKann_M19kadClH7PLbcgDXaXRRowYUM4c7dWkY,1326 -cloudconnectlib/core/ext.py,sha256=CzhzOz8CFGKG4hbBci4vwmGBl3zI2qQDsr4AA9U_I9g,13104 -cloudconnectlib/core/http.py,sha256=5n2bRkVyHE-SZ2F_cZMCtG2j3eTdMaBLjaKV-bB1Erc,10577 -cloudconnectlib/core/job.py,sha256=FRUs9KQeF-tFFIlBgkdBRNr0uDBvVsY19xpzhVTNtz4,4880 -cloudconnectlib/core/models.py,sha256=bPEMXKCv_UxsWGOvr-ypCQr_ie6DQaQrrzqWBPlmmE8,10190 -cloudconnectlib/core/pipemgr.py,sha256=KHnwa0O1rpnpFAgRS4Xgq5t1pU2Llc7xk6yJDtbd19s,1037 -cloudconnectlib/core/plugin.py,sha256=mI1u7OOfcB04stG_sk5ANUT-KJc8MgINk4RgTIQqVKU,3667 -cloudconnectlib/core/task.py,sha256=MlO6ZMWv5fNB2fjDazgmxM5tJEpvM1iG53RwfpdEnUE,19669 -cloudconnectlib/core/template.py,sha256=ROHW-eR7FKeyht5_cDlHZt-3f6VM4y4DfCe_FnjEWuo,1152 -cloudconnectlib/splunktacollectorlib/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 -cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py,sha256=EWttmxmSVLNrwleV65Xf_v-ts6R-Gk3nSYWWOQ7xgFA,2990 -cloudconnectlib/splunktacollectorlib/common/__init__.py,sha256=J2HhI2PL-5WtAl4FYqQBbV7ru86w7FsmVXyFJz9xPlc,1625 -cloudconnectlib/splunktacollectorlib/common/log.py,sha256=SaiYUrl7UmzfqxhmefR-eKSfJ5JHfm_9mN5JwgVd2WU,1974 -cloudconnectlib/splunktacollectorlib/common/rwlock.py,sha256=pBLwQ9u4jcz4K419XW7Lp21zj5QyVpVnXmzZERdEglk,2119 -cloudconnectlib/splunktacollectorlib/common/schema_meta.py,sha256=p3MMcXfrLWkCgfdg_vnPrHIyrAJaAqkDX0gDA1NV1Vk,803 -cloudconnectlib/splunktacollectorlib/config.py,sha256=naUrLtDgMIFQyVyfsqFAtQJIMKL8oQ7MxjkbuLSsplQ,14724 -cloudconnectlib/splunktacollectorlib/data_collection/__init__.py,sha256=OSP5_M7Nh4SvlJT246Qfn-LRRhsVzhhY948E7ffHAKo,596 -cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py,sha256=7awjOwGS2kWMn8OE4ki0uh1deF-j09-ZHXcJU6gcb_s,6376 -cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py,sha256=NKncmmObdJl-Nmd5DOWNS7DBgng3uiV2Xg7UCD1Z8jg,6859 -cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py,sha256=bvwnVB-1PJLK_FaLiaGpycpzQf4AmRujNvwy28OwQkI,1897 -cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py,sha256=209EqJwpeymsAVS7GE2ZwFyPVf1KU9M7yS8VlItaLcA,3345 -cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py,sha256=EI3CPiNWfMFJ8j-Hcq0_9rXE-_WndYVXEOVRdVH6L0Y,5967 -cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py,sha256=RrY2LqAXmqkDtWzYgja4o0ADEBzCkDZy0NV0OYXAQpE,5714 -cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py,sha256=KMFAPwE8liOPW4sUancUiYJV6wNlbIQ77Uf--EmJLW0,5924 -cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py,sha256=qV530k2jo1mzzolb7-EJcwx2VBq8asZwzWzUlMUFvpA,8474 -cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py,sha256=LR4encL187WnyNPp9vtVd6PAZxMUjNCriOEwpN2gR40,1133 -cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py,sha256=PgNr7HAeRz8l-B2HnBCHMdv_qAkXRflXAqkn9aTaXqo,2084 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/LICENSE similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/LICENSE diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/METADATA similarity index 65% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/METADATA index eef4cb7..21ffdb5 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.0b3.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: cloudconnectlib -Version: 3.1.0b3 +Version: 3.1.3 Summary: APP Cloud Connect License: Apache-2.0 Author: Addon Factory template @@ -14,15 +14,17 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.7 Requires-Dist: PySocks (>=1.7.1,<2.0.0) -Requires-Dist: decorator (>=4,<6) -Requires-Dist: future -Requires-Dist: httplib2 (>=0.19,<0.20) +Requires-Dist: decorator (==5.1.1) Requires-Dist: jinja2 (>=2.10.1,<4.0.0) Requires-Dist: jsonpath-ng (>=1.5.2,<2.0.0) -Requires-Dist: jsonschema (>=3.2.0,<4.0.0) +Requires-Dist: jsonschema (>=4.4.0,<5.0.0) Requires-Dist: munch (>=2.3.2,<3.0.0) -Requires-Dist: requests (>=2.24,<3.0) -Requires-Dist: six +Requires-Dist: requests (>=2.27.1,<3.0.0) +Requires-Dist: solnlib (>=4.6.0,<5.0.0) Requires-Dist: splunk-sdk (>=1.6,<2.0) -Requires-Dist: splunktaucclib (>=5,<6) +Requires-Dist: splunktalib (==3.0.0) +Requires-Dist: splunktaucclib (==6.0.0) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/RECORD new file mode 100644 index 0000000..ffb2114 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/RECORD @@ -0,0 +1,47 @@ +cloudconnectlib-3.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +cloudconnectlib-3.1.3.dist-info/LICENSE,sha256=Xvvd894DEl8lUHPEeFU-Ya18RW7Hc2IlPTZS_bE3Hgs,11341 +cloudconnectlib-3.1.3.dist-info/METADATA,sha256=b8QBy1ykc1bv6IlFv5cszhuuR6nqNCXOsr61YwA07k4,1188 +cloudconnectlib-3.1.3.dist-info/RECORD,, +cloudconnectlib-3.1.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +cloudconnectlib-3.1.3.dist-info/WHEEL,sha256=vVCvjcmxuUltf8cYhJ0sJMRDLr1XsPuxEId8YDzbyCY,88 +cloudconnectlib/__init__.py,sha256=KPOnKSsmH-eAGJwZ3LQey4CMmJiCQOLsf8xvKjSiiUE,628 +cloudconnectlib/client.py,sha256=bCb28NRHRmDBkFLLa0eYK-CUsH8m4oU9jR00hQMShsc,3124 +cloudconnectlib/common/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 +cloudconnectlib/common/lib_util.py,sha256=GqAdYWwzvpOXnbjY__ioyDILwhZC2LeOaHiKcl0s7I4,1669 +cloudconnectlib/common/log.py,sha256=HJ10AfdwC-hMYkCYkhW0W27FYScGavxMbu3mCUFfv_I,1788 +cloudconnectlib/common/util.py,sha256=p3tTSMAOAEkeT0WmUFNWgXTYspL_vvxsoPCHWX-jWck,1942 +cloudconnectlib/configuration/__init__.py,sha256=vDmcGJjnxxkHIICiIOeK0FSJK_hxl3SJGHwdCDbYzMc,617 +cloudconnectlib/configuration/loader.py,sha256=jgzTkjp-HsHRWdKYRR4dRcGcLa3Fv43sZAQgh-iSnro,10605 +cloudconnectlib/configuration/schema_1_0_0.json,sha256=WKWbZJAyWu-rt-OLwvNoMo_UqBi3iwxHbIcRy0aSRy0,11645 +cloudconnectlib/core/__init__.py,sha256=02DdxhQvHpl-ncbu3B3GY1qvoWVHhhpSyYP4Gez9bxU,665 +cloudconnectlib/core/checkpoint.py,sha256=YkpN8pUFb8Z2iScivQHHW9HN9RqlX7FuqqpqNcGfFK8,1816 +cloudconnectlib/core/defaults.py,sha256=VRwIIKxLGkDQiLB7c776y2rHsb0AmMi3F-hmzJKVdUw,1317 +cloudconnectlib/core/engine.py,sha256=Lt0RQR28YFa_X1cEpOH_LpT3hUcIzGe2lT7tHnw3KGA,10924 +cloudconnectlib/core/engine_v2.py,sha256=FTGYnoVSFUsuvuOh_0AwpsDRN4hxAGwYqdUqSQcUmok,5217 +cloudconnectlib/core/exceptions.py,sha256=pv35SgTgVc6ohLp7sopGYPeIMxcL8N1lWH0Tb6n6_wE,1314 +cloudconnectlib/core/ext.py,sha256=mn_ddquSQ6-6G7bKeSqeg5MYfjRWT0yuxpDY7eDjEVQ,12277 +cloudconnectlib/core/http.py,sha256=vLOnciuoEGt45Ygkrll56KxiSN2DX3tTiKVbiUpbdKw,10992 +cloudconnectlib/core/job.py,sha256=77nHgtyiAZBSvrQiEenb2oiIJ1tbzb3PCvL9AuzrtUc,4873 +cloudconnectlib/core/models.py,sha256=wYrknbeNN6861asiRCKR_5qMpAvHIaDvKE9bFzpW2cU,10002 +cloudconnectlib/core/pipemgr.py,sha256=KAOPjL6ktPQfo1f7pepsiVEOGzN6ut2SjvbyOIZRCIY,929 +cloudconnectlib/core/plugin.py,sha256=GNyOhpT8-HhgAMqG26D5pgE0WcFTkD7QJFUlLEDY6Pc,3592 +cloudconnectlib/core/task.py,sha256=HahqiR6WaS8A4AMBHv4Tss-CNq5HC0Z07l9XJHdqm_A,20996 +cloudconnectlib/core/template.py,sha256=lrnl-xVai45ngNMNg4nfOYYITs9mk16j5nEB9tR3ZpY,1153 +cloudconnectlib/splunktacollectorlib/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 +cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py,sha256=5tKhEfXcG3OBaZPwFq9d2nuhVkUbjmJdAAg6H54O81A,2915 +cloudconnectlib/splunktacollectorlib/common/__init__.py,sha256=zUQIoODTieWPOAgrAKU0dP1SAS_c6IF1l1LEXO5LvfU,1624 +cloudconnectlib/splunktacollectorlib/common/log.py,sha256=Feu868c98qJYDG2XRZ36aPlm8mLwklVshBFp-2p_W88,1919 +cloudconnectlib/splunktacollectorlib/common/rwlock.py,sha256=PftXe5_OKBCHqueVBzSaAZteSbbJnRypqglxuitpTgo,2065 +cloudconnectlib/splunktacollectorlib/common/schema_meta.py,sha256=k3oKBoU7nvBXIJB5lWvIdykKPpvBV29zyvayd2bv2OQ,803 +cloudconnectlib/splunktacollectorlib/config.py,sha256=QfheX8Z_8HTlSYlqxRlPpLfgdGLMJNL0HZ_6pxdGC4c,14530 +cloudconnectlib/splunktacollectorlib/data_collection/__init__.py,sha256=KD_6X3x7TBHyyDAygJI-_evQkuvkDFtuHHxioYKC9C0,597 +cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py,sha256=V-QOELo14sNtv48MD_KceEaBhWwBwYdSQ2n2FE2Exs8,6324 +cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py,sha256=iQnPwlcNhW0WzdCDu3TPH8bqSZlMIRrhpHl6D38hbtU,6880 +cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py,sha256=9ns0y9TOzOzFIfTz0AVHS2Dw66bD1eeFXoH5xGb4-BQ,1897 +cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py,sha256=EVzIU1-mYqZ8kg31A_bHD0TVyd_4N7D2T5zWngPnBXk,2876 +cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py,sha256=BhCoFr39FWUDqo6itfcPQy8F9QGXqBV_zzp2KVpAUgA,5701 +cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py,sha256=5LCshOxw4I5esnSwZY-GSEP2Qfo7WwKc_7yU0x7Uyjk,5503 +cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py,sha256=khCUCJE4XjzXmqYmouyieN6uuFIhDD0D5j87U0ii2lY,5514 +cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py,sha256=W4Wr2aSA1aBuuqLZwRiGl9Dhb187dYxX15206QOEVNE,8355 +cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py,sha256=DH8W8j85sDqpVBSoT7uxlp3aqp_LMcG8pyPTblgrPnU,1132 +cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py,sha256=88pz8BNSgID2Z_6tmUeLU8wEaZ46GxUHfLi_gEjSc50,1755 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/REQUESTED similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/REQUESTED rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/REQUESTED diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/WHEEL new file mode 100644 index 0000000..4ba7671 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib-3.1.3.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: poetry-core 1.4.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/__init__.py index 1ba7634..5aa568b 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/__init__.py @@ -14,12 +14,7 @@ # limitations under the License. # """ -APP Cloud Connect +Cloud Connect library """ -import os -from .common.lib_util import register_cacert_locater - -register_cacert_locater(os.path.join(os.path.dirname(__file__), 'core', 'cacerts')) - -__version__ = '2.0.2' +__version__ = "3.1.3" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/client.py index 22c93a8..0383214 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/client.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/client.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import object import copy import os.path import traceback @@ -27,9 +26,8 @@ _logger = get_cc_logger() -class CloudConnectClient(object): - """The client of cloud connect used to start a cloud connect engine instance. - """ +class CloudConnectClient: + """The client of cloud connect used to start a cloud connect engine instance.""" def __init__(self, context, config_file, checkpoint_mgr): """ @@ -52,19 +50,20 @@ def _load_config(self): conf = load_json_file(self._config_file) except: raise ConfigException( - 'Unable to load configuration file %s: %s' + "Unable to load configuration file %s: %s" % (self._config_file, traceback.format_exc()) ) - version = conf.get('meta', {'apiVersion', None}).get('apiVersion', None) + version = conf.get("meta", {"apiVersion", None}).get("apiVersion", None) if not version: raise ConfigException( - 'Config meta or api version not present in {}'.format( - self._config_file)) + f"Config meta or api version not present in {self._config_file}" + ) config_loader, schema_file = get_loader_by_version(version) schema_path = os.path.join( - os.path.dirname(__file__), 'configuration', schema_file) + os.path.dirname(__file__), "configuration", schema_file + ) return config_loader.load(conf, schema_path, self._context) @@ -80,14 +79,13 @@ def start(self): self._engine.start( context=copy.deepcopy(self._context), config=self._config, - checkpoint_mgr=self._checkpoint_mgr + checkpoint_mgr=self._checkpoint_mgr, ) except Exception as ex: - _logger.exception('Error while starting client') + _logger.exception("Error while starting client") raise ex def stop(self): - """Stop the current cloud connect engine. - """ + """Stop the current cloud connect engine.""" if self._engine: self._engine.stop() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/lib_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/lib_util.py index c1fee2e..8710e54 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/lib_util.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/lib_util.py @@ -20,8 +20,6 @@ import __main__ -from ..splunktacollectorlib.common import log as stulog - def get_main_file(): """Return the running mod input file""" @@ -36,34 +34,25 @@ def get_app_root_dir(): def get_mod_input_script_name(): """Return the name of running mod input""" script_name = os.path.basename(get_main_file()) - if script_name.lower().endswith('.py'): + if script_name.lower().endswith(".py"): script_name = script_name[:-3] return script_name def register_module(new_path): - """ register_module(new_path): adds a directory to sys.path. + """register_module(new_path): adds a directory to sys.path. Do nothing if it does not exist or if it's already in sys.path. """ if not os.path.exists(new_path): return new_path = os.path.abspath(new_path) - if platform.system() == 'Windows': + if platform.system() == "Windows": new_path = new_path.lower() for x in sys.path: x = os.path.abspath(x) - if platform.system() == 'Windows': + if platform.system() == "Windows": x = x.lower() if new_path in (x, x + os.sep): return sys.path.insert(0, new_path) - - -def register_cacert_locater(cacerts_locater_path): - for x in sys.modules: - if (x == "httplib2" or x.endswith(".httplib2")) and sys.modules[x] \ - is not None: - stulog.logger.warning("Httplib2 module '{}' is already installed. " - "The ca_certs_locater may not work".format(x)) - register_module(cacerts_locater_path) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/log.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/log.py index 907d56d..50a2462 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/log.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/log.py @@ -16,20 +16,20 @@ import logging from solnlib.pattern import Singleton + from ..splunktacollectorlib.common import log as stulog from ..splunktacollectorlib.data_collection import ta_helper as th from .lib_util import get_mod_input_script_name -from future.utils import with_metaclass -class CloudClientLogAdapter(with_metaclass(Singleton, logging.LoggerAdapter)): +class CloudClientLogAdapter(logging.LoggerAdapter, metaclass=Singleton): def __init__(self, logger=None, extra=None, prefix=""): - super(CloudClientLogAdapter, self).__init__(logger, extra) + super().__init__(logger, extra) self.cc_prefix = prefix if prefix else "" def process(self, msg, kwargs): - msg = "{} {}".format(self.cc_prefix, msg) - return super(CloudClientLogAdapter, self).process(msg, kwargs) + msg = f"{self.cc_prefix} {msg}" + return super().process(msg, kwargs) def set_level(self, val): self.logger.setLevel(val) @@ -38,20 +38,20 @@ def set_level(self, val): _adapter = CloudClientLogAdapter(stulog.logger) -def set_cc_logger(logger, logger_prefix=''): +def set_cc_logger(logger, logger_prefix=""): global _adapter _adapter.logger = logger - _adapter.cc_prefix = logger_prefix or '' + _adapter.cc_prefix = logger_prefix or "" def get_cc_logger(): return _adapter -def reset_cc_logger(stanza_name, logging_level, logger_prefix=''): +def reset_cc_logger(stanza_name, logging_level, logger_prefix=""): script_name = get_mod_input_script_name() logger_name = script_name + "_" + th.format_name_for_file(stanza_name) stulog.reset_logger(logger_name) stulog.set_log_level(logging_level) set_cc_logger(stulog.logger, logger_prefix) - return get_cc_logger() \ No newline at end of file + return get_cc_logger() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/util.py index e072ac7..1e188ae 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/util.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/common/util.py @@ -14,7 +14,8 @@ # limitations under the License. # import json -from splunktalib.common import util + +from solnlib import utils from solnlib.modular_input.event import XMLEvent @@ -23,11 +24,11 @@ def is_valid_bool(val): :param val: value as string. :return: `True` if value can be convert to bool else `False`. """ - return util.is_true(val) or util.is_false(val) + return utils.is_true(val) or utils.is_false(val) def is_true(val): - return util.is_true(val) + return utils.is_true(val) def is_valid_port(port): @@ -47,17 +48,32 @@ def load_json_file(file_path): :param file_path: JSON file path. :return: A `dict` object. """ - with open(file_path, 'r') as file_pointer: + with open(file_path) as file_pointer: return json.load(file_pointer) -def format_events(raw_events, time=None, - index=None, host=None, source=None, sourcetype=None, - stanza=None, unbroken=False, done=False): - return XMLEvent.format_events(XMLEvent(data, time=time, - index=index, host=host, - source=source, - sourcetype=sourcetype, - stanza=stanza, unbroken=unbroken, - done=done) for data in - raw_events) +def format_events( + raw_events, + time=None, + index=None, + host=None, + source=None, + sourcetype=None, + stanza=None, + unbroken=False, + done=False, +): + return XMLEvent.format_events( + XMLEvent( + data, + time=time, + index=index, + host=host, + source=source, + sourcetype=sourcetype, + stanza=stanza, + unbroken=unbroken, + done=done, + ) + for data in raw_events + ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/configuration/loader.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/configuration/loader.py index 1eb6470..c9ac7a6 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/configuration/loader.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/configuration/loader.py @@ -13,62 +13,62 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import str -from builtins import object import logging import re import traceback from abc import abstractmethod -import six -from jsonschema import validate, ValidationError +from jsonschema import ValidationError, validate from munch import munchify + from ..common.log import get_cc_logger -from ..common.util import ( - load_json_file, is_valid_bool, is_valid_port, is_true -) +from ..common.util import is_true, is_valid_bool, is_valid_port, load_json_file from ..core.exceptions import ConfigException from ..core.ext import lookup_method from ..core.models import ( - BasicAuthorization, RequestParams, Processor, - Condition, Task, Checkpoint, IterationMode, - DictToken + BasicAuthorization, + Checkpoint, + Condition, + DictToken, + IterationMode, + Processor, + RequestParams, + Task, ) _logger = get_cc_logger() -_PROXY_TYPES = ['http', 'socks4', 'socks5',] -_AUTH_TYPES = { - 'basic_auth': BasicAuthorization -} +_PROXY_TYPES = ["http", "socks4", "socks5", "http_no_tunnel"] +_AUTH_TYPES = {"basic_auth": BasicAuthorization} _LOGGING_LEVELS = { - 'DEBUG': logging.DEBUG, - 'INFO': logging.INFO, - 'WARNING': logging.WARNING, - 'ERROR': logging.ERROR, - 'FATAL': logging.FATAL, - 'CRITICAL': logging.CRITICAL + "DEBUG": logging.DEBUG, + "INFO": logging.INFO, + "WARNING": logging.WARNING, + "ERROR": logging.ERROR, + "FATAL": logging.FATAL, + "CRITICAL": logging.CRITICAL, } # FIXME Make this configurable -_DEFAULT_LOG_LEVEL = 'INFO' +_DEFAULT_LOG_LEVEL = "INFO" -class CloudConnectConfigLoader(object): +class CloudConnectConfigLoader: """The Base cloud connect configuration loader""" @staticmethod def _get_schema_from_file(schema_file): - """ Load JSON based schema definition from schema file path. + """Load JSON based schema definition from schema file path. :return: A `dict` contains schema. """ try: return load_json_file(schema_file) except: raise ConfigException( - 'Cannot load schema from file {}: {}'.format( - schema_file, traceback.format_exc()) + "Cannot load schema from file {}: {}".format( + schema_file, traceback.format_exc() + ) ) @abstractmethod @@ -81,8 +81,7 @@ class CloudConnectConfigLoaderV1(CloudConnectConfigLoader): def _render_from_dict(source, ctx): rendered = DictToken(source).render(ctx) - return dict((k, v.strip() if isinstance(v, six.string_types) else v) - for k, v in rendered.items()) + return {k: v.strip() if isinstance(v, str) else v for k, v in rendered.items()} def _load_proxy(self, candidate, variables): """ @@ -96,15 +95,13 @@ def _load_proxy(self, candidate, variables): proxy = self._render_from_dict(candidate, variables) - enabled = proxy.get('enabled', '0') + enabled = proxy.get("enabled", "0") if not is_valid_bool(enabled): - raise ValueError( - 'Proxy "enabled" expect to be bool type: {}'.format(enabled) - ) + raise ValueError(f'Proxy "enabled" expect to be bool type: {enabled}') - proxy['enabled'] = is_true(enabled) + proxy["enabled"] = is_true(enabled) - host, port = proxy.get('host'), proxy.get('port') + host, port = proxy.get("host"), proxy.get("port") if host or port: if not host: @@ -116,24 +113,23 @@ def _load_proxy(self, candidate, variables): ) # proxy type default to 'http' - proxy_type = proxy.get('type') - proxy_type = proxy_type.lower() if proxy_type else 'http' + proxy_type = proxy.get("type") + proxy_type = proxy_type.lower() if proxy_type else "http" if proxy_type not in _PROXY_TYPES: raise ValueError( 'Proxy "type" expect to be one of [{}]: {}'.format( - ','.join(_PROXY_TYPES), proxy_type) + ",".join(_PROXY_TYPES), proxy_type + ) ) else: - proxy['type'] = proxy_type + proxy["type"] = proxy_type # proxy rdns default to '0' - proxy_rdns = proxy.get('rdns', '0') + proxy_rdns = proxy.get("rdns", "0") if not is_valid_bool(proxy_rdns): - raise ValueError( - 'Proxy "rdns" expect to be bool type: {}'.format(proxy_rdns) - ) + raise ValueError(f'Proxy "rdns" expect to be bool type: {proxy_rdns}') else: - proxy['rdns'] = is_true(proxy_rdns) + proxy["rdns"] = is_true(proxy_rdns) return proxy @@ -148,7 +144,8 @@ def _get_log_level(level_name): _logger.warning( 'The log level "%s" is invalid, set it to default: "%s"', - level_name, _DEFAULT_LOG_LEVEL + level_name, + _DEFAULT_LOG_LEVEL, ) return _LOGGING_LEVELS[_DEFAULT_LOG_LEVEL] @@ -156,7 +153,7 @@ def _get_log_level(level_name): def _load_logging(self, log_setting, variables): logger = self._render_from_dict(log_setting, variables) - logger['level'] = self._get_log_level(logger.get('level')) + logger["level"] = self._get_log_level(logger.get("level")) return logger @@ -168,98 +165,94 @@ def _load_global_setting(self, candidate, variables): :return: A `Munch` object """ candidate = candidate or {} - proxy_setting = self._load_proxy(candidate.get('proxy'), variables) - log_setting = self._load_logging(candidate.get('logging'), variables) + proxy_setting = self._load_proxy(candidate.get("proxy"), variables) + log_setting = self._load_logging(candidate.get("logging"), variables) - return munchify({'proxy': proxy_setting, 'logging': log_setting}) + return munchify({"proxy": proxy_setting, "logging": log_setting}) @staticmethod def _load_authorization(candidate): if candidate is None: return None - auth_type = candidate['type'].lower() + auth_type = candidate["type"].lower() if auth_type not in _AUTH_TYPES: raise ValueError( - 'Auth type expect to be one of [{}]: {}'.format( - ','.join(list(_AUTH_TYPES.keys())), auth_type) + "Auth type expect to be one of [{}]: {}".format( + ",".join(list(_AUTH_TYPES.keys())), auth_type + ) ) - return _AUTH_TYPES[auth_type](candidate['options']) + return _AUTH_TYPES[auth_type](candidate["options"]) def _load_options(self, options): return RequestParams( - auth=self._load_authorization(options.get('auth')), - url=options['url'], - method=options.get('method', 'GET'), - header=options.get('headers', {}), - body=options.get('body', {}) + auth=self._load_authorization(options.get("auth")), + url=options["url"], + method=options.get("method", "GET"), + header=options.get("headers", {}), + body=options.get("body", {}), ) @staticmethod def _validate_method(method): if lookup_method(method) is None: - raise ValueError('Unimplemented method: {}'.format(method)) + raise ValueError(f"Unimplemented method: {method}") def _parse_tasks(self, raw_tasks): tasks = [] for item in raw_tasks: - self._validate_method(item['method']) - tasks.append(Task(item['input'], item['method'], item.get('output'))) + self._validate_method(item["method"]) + tasks.append(Task(item["input"], item["method"], item.get("output"))) return tasks def _parse_conditions(self, raw_conditions): conditions = [] for item in raw_conditions: - self._validate_method(item['method']) - conditions.append(Condition(item['input'], item['method'])) + self._validate_method(item["method"]) + conditions.append(Condition(item["input"], item["method"])) return conditions @staticmethod def _load_checkpoint(checkpoint): if not checkpoint: return None - return Checkpoint( - checkpoint.get('namespace', []), checkpoint['content']) + return Checkpoint(checkpoint.get("namespace", []), checkpoint["content"]) def _load_iteration_mode(self, iteration_mode): - count = iteration_mode.get('iteration_count', '0') + count = iteration_mode.get("iteration_count", "0") try: iteration_count = int(count) except ValueError: - raise ValueError( - '"iteration_count" must be an integer: %s' % count) + raise ValueError('"iteration_count" must be an integer: %s' % count) - stop_conditions = self._parse_conditions( - iteration_mode['stop_conditions']) + stop_conditions = self._parse_conditions(iteration_mode["stop_conditions"]) - return IterationMode(iteration_count=iteration_count, - conditions=stop_conditions) + return IterationMode( + iteration_count=iteration_count, conditions=stop_conditions + ) def _load_processor(self, processor): - skip_conditions = self._parse_conditions( - processor.get('skip_conditions', []) - ) - pipeline = self._parse_tasks(processor.get('pipeline', [])) - return Processor( - skip_conditions=skip_conditions, - pipeline=pipeline - ) + skip_conditions = self._parse_conditions(processor.get("skip_conditions", [])) + pipeline = self._parse_tasks(processor.get("pipeline", [])) + return Processor(skip_conditions=skip_conditions, pipeline=pipeline) def _load_request(self, request): - options = self._load_options(request['request']) - - pre_process = self._load_processor(request.get('pre_process', {})) - post_process = self._load_processor(request['post_process']) - checkpoint = self._load_checkpoint(request.get('checkpoint')) - iteration_mode = self._load_iteration_mode(request['iteration_mode']) - - return munchify({ - 'request': options, - 'pre_process': pre_process, - 'post_process': post_process, - 'checkpoint': checkpoint, - 'iteration_mode': iteration_mode, - }) + options = self._load_options(request["request"]) + + pre_process = self._load_processor(request.get("pre_process", {})) + post_process = self._load_processor(request["post_process"]) + checkpoint = self._load_checkpoint(request.get("checkpoint")) + iteration_mode = self._load_iteration_mode(request["iteration_mode"]) + + return munchify( + { + "request": options, + "pre_process": pre_process, + "post_process": post_process, + "checkpoint": checkpoint, + "iteration_mode": iteration_mode, + } + ) def load(self, definition, schema_file, context): """Load cloud connect configuration from a `dict` and validate @@ -273,35 +266,39 @@ def load(self, definition, schema_file, context): validate(definition, self._get_schema_from_file(schema_file)) except ValidationError: raise ConfigException( - 'Failed to validate interface with schema: {}'.format( - traceback.format_exc())) + "Failed to validate interface with schema: {}".format( + traceback.format_exc() + ) + ) try: global_settings = self._load_global_setting( - definition.get('global_settings'), context + definition.get("global_settings"), context ) - requests = [self._load_request(item) for item in definition['requests']] + requests = [self._load_request(item) for item in definition["requests"]] - return munchify({ - 'meta': munchify(definition['meta']), - 'tokens': definition['tokens'], - 'global_settings': global_settings, - 'requests': requests, - }) + return munchify( + { + "meta": munchify(definition["meta"]), + "tokens": definition["tokens"], + "global_settings": global_settings, + "requests": requests, + } + ) except Exception as ex: - error = 'Unable to load configuration: %s' % str(ex) + error = "Unable to load configuration: %s" % str(ex) _logger.exception(error) raise ConfigException(error) _loader_and_schema_by_version = { - r'1\.0\.0': (CloudConnectConfigLoaderV1, 'schema_1_0_0.json'), + r"1\.0\.0": (CloudConnectConfigLoaderV1, "schema_1_0_0.json"), } def get_loader_by_version(version): - """ Instantiate a configuration loader on basis of a given version. + """Instantiate a configuration loader on basis of a given version. A `ConfigException` will raised if the version is not supported. :param version: Version to lookup config loader. :return: A config loader. @@ -312,7 +309,8 @@ def get_loader_by_version(version): return loader_cls(), schema raise ConfigException( - 'Unsupported schema version {}, current supported' - ' versions should match these regex [{}]'.format(version, ','.join( - _loader_and_schema_by_version)) + "Unsupported schema version {}, current supported" + " versions should match these regex [{}]".format( + version, ",".join(_loader_and_schema_by_version) + ) ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/configuration/schema_1_0_0.json b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/configuration/schema_1_0_0.json old mode 100644 new mode 100755 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/cacerts/ca_certs_locater.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/cacerts/ca_certs_locater.py deleted file mode 100755 index 44040b2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/cacerts/ca_certs_locater.py +++ /dev/null @@ -1,145 +0,0 @@ -# -# Copyright 2021 Splunk Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -`ca_certs_locater` is a lib for extending httplib2 to allow system certificate store to be used when -verifying SSL certificates, to enable this lib, you should add it to your python import path before -initializing httplib2. As we're not trying to implement SSL certificate RFCs, parsing and validating -certificates are not included. -""" - -import atexit -import os -import os.path as op -import ssl -import sys - -TEMP_CERT_FILE_NAME = 'httplib2_merged_certificates_{}.crt' -LINUX_CERT_PATH_1 = '/etc/pki/tls/certs/ca-bundle.crt' # RedHat -LINUX_CERT_PATH_2 = '/etc/ssl/certs/ca-certificates.crt' # Debian -DARWIN_CERT_PATH = '/usr/local/etc/openssl/cert.pem' -HTTPLIB2_CA_CERT_FILE_NAME = 'cacerts.txt' - -TEMP_CERT_FILE_PATH = None - - -def get(): - """ - Returns: a path to generated certificate authority file - - """ - try: - return _get() - except (IOError, OSError, ssl.SSLError): - _fallback() # IO and SSL relative errors should be swallowed to protect the HTTP request - - -def _get(): - global TEMP_CERT_FILE_PATH - # also check file existence as it's possible for the temp file to be deleted - if TEMP_CERT_FILE_PATH is None or not os.path.exists(TEMP_CERT_FILE_PATH): - temp_cert_file_path = _generate_temp_cert_file_name() - ssl_ca_certs = _read_ssl_default_ca_certs() - if not ssl_ca_certs: - # it's possible the ca load path is not well configured, try some typical paths - ssl_ca_certs = _read_platform_pem_cert_file() - - if ssl_ca_certs: # only update temp cert file when there's additional PEM certs found - cert_files = [ssl_ca_certs, _read_httplib2_default_certs()] - _update_temp_cert_file(temp_cert_file_path, cert_files) - TEMP_CERT_FILE_PATH = temp_cert_file_path - else: - _fallback() - - return TEMP_CERT_FILE_PATH - - -def _fallback(): - """ - Give up the loading process by throwing specified exception, httplib2 will then use its - bundled certificates - """ - raise ImportError('Unable to load system certificate authority files') - - -def _read_platform_pem_cert_file(): - if sys.platform.startswith('linux'): - pem_files = [_read_pem_file(LINUX_CERT_PATH_1), _read_pem_file(LINUX_CERT_PATH_2)] - return '\n'.join([_f for _f in pem_files if _f]) - elif sys.platform.startswith('darwin'): - return _read_pem_file(DARWIN_CERT_PATH) - else: - return "" - - -def _read_ssl_default_ca_certs(): - # it's not guaranteed to return PEM formatted certs when `binary_form` is False - der_certs = ssl.create_default_context().get_ca_certs(binary_form=True) - pem_certs = [ssl.DER_cert_to_PEM_cert(der_cert_bytes) for der_cert_bytes in der_certs] - return '\n'.join(pem_certs) - - -def _read_httplib2_default_certs(): - import httplib2 # import error should not happen here, and will be well handled by outer called - httplib_dir = os.path.dirname(os.path.abspath(httplib2.__file__)) - ca_certs_path = os.path.join(httplib_dir, HTTPLIB2_CA_CERT_FILE_NAME) - return _read_pem_file(ca_certs_path) - - -def _read_pem_file(path): - if os.path.exists(path): - with open(path, mode='r') as pem_file: - return pem_file.read() - else: - return "" - - -def _update_temp_cert_file(temp_file, pem_texts): - with open(temp_file, mode='w') as temp_cert_file: - for pem_text in pem_texts: - if len(pem_text) > 0: - temp_cert_file.write(pem_text + '\n') - temp_cert_file.flush() - atexit.register(_do_safe_remove, temp_file) - - -def _do_safe_remove(file_path): - if os.path.exists(file_path): - try: - os.remove(file_path) - except: - pass - - -def _get_temp_cert_file_dir(): - import __main__ - app_root = op.dirname(op.dirname(op.abspath(__main__.__file__))) - - temp_dir = op.join(app_root, 'temp_certs') - if not op.isdir(temp_dir): - try: - os.mkdir(temp_dir) - except: - pass - for candidate in ['temp_certs', 'local', 'default']: - dir_path = op.join(app_root, candidate) - if op.isdir(dir_path): - return dir_path - return app_root - - -def _generate_temp_cert_file_name(): - file_name = TEMP_CERT_FILE_NAME.format(os.getpid()) - return os.path.join(_get_temp_cert_file_dir(), file_name) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/checkpoint.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/checkpoint.py old mode 100755 new mode 100644 index 6aacd2c..6e8b8b9 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/checkpoint.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/checkpoint.py @@ -15,7 +15,7 @@ # import cloudconnectlib.splunktacollectorlib.data_collection.ta_checkpoint_manager as tacm from cloudconnectlib.common.log import get_cc_logger -from cloudconnectlib.core.models import _Token, DictToken +from cloudconnectlib.core.models import DictToken, _Token logger = get_cc_logger() @@ -24,7 +24,7 @@ class CheckpointManagerAdapter(tacm.TACheckPointMgr): """Wrap TACheckPointMgr for custom usage""" def __init__(self, namespaces, content, meta_config, task_config): - super(CheckpointManagerAdapter, self).__init__(meta_config, task_config) + super().__init__(meta_config, task_config) if isinstance(namespaces, (list, tuple)): self.namespaces = (_Token(t) for t in namespaces) else: @@ -36,16 +36,15 @@ def _namespaces_for(self, ctx): def save(self, ctx): """Save checkpoint""" - super(CheckpointManagerAdapter, self).update_ckpt( - ckpt=self.content.render(ctx), - namespaces=self._namespaces_for(ctx) + super().update_ckpt( + ckpt=self.content.render(ctx), namespaces=self._namespaces_for(ctx) ) def load(self, ctx): """Load checkpoint""" namespaces = self._namespaces_for(ctx) - checkpoint = super(CheckpointManagerAdapter, self).get_ckpt(namespaces) + checkpoint = super().get_ckpt(namespaces) if checkpoint is None: - logger.info('No existing checkpoint found') + logger.info("No existing checkpoint found") checkpoint = {} return checkpoint diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/defaults.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/defaults.py index 2144e86..52710e5 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/defaults.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/defaults.py @@ -22,14 +22,29 @@ success_statuses = (200, 201) # statuses be treated as success. # response status which need to retry. -retry_statuses = (429, 500, 501, 502, 503, 504, 505, 506, 507, - 509, 510, 511) +retry_statuses = (429, 500, 501, 502, 503, 504, 505, 506, 507, 509, 510, 511) # response status which need print a warning log. -warning_statuses = (203, 204, 205, 206, 207, 208, 226, - 300, 301, 302, 303, 304, 305, 306, 307, 308) +warning_statuses = ( + 203, + 204, + 205, + 206, + 207, + 208, + 226, + 300, + 301, + 302, + 303, + 304, + 305, + 306, + 307, + 308, +) retries = 3 # Default maximum retry times. max_iteration_count = 100 # maximum iteration loop count -charset = 'utf-8' # Default response charset if not found in response header +charset = "utf-8" # Default response charset if not found in response header diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine.py index 5e1da2d..4197451 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine.py @@ -13,20 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import object import threading +from ..common.log import get_cc_logger from . import defaults from .exceptions import HTTPError, StopCCEIteration from .http import HttpClient -from ..common.log import get_cc_logger _logger = get_cc_logger() -class CloudConnectEngine(object): +class CloudConnectEngine: """The cloud connect engine to process request instantiated - from user options.""" + from user options.""" def __init__(self): self._stopped = False @@ -38,17 +37,17 @@ def _set_logging(log_setting): def start(self, context, config, checkpoint_mgr): """Start current client instance to execute each request parsed - from config. + from config. """ if not config: - raise ValueError('Config must not be empty') + raise ValueError("Config must not be empty") context = context or {} global_setting = config.global_settings CloudConnectEngine._set_logging(global_setting.logging) - _logger.info('Start to execute requests jobs.') + _logger.info("Start to execute requests jobs.") processed = 0 for request in config.requests: @@ -62,34 +61,33 @@ def start(self, context, config, checkpoint_mgr): job.run() processed += 1 - _logger.info('%s job(s) process finished', processed) + _logger.info("%s job(s) process finished", processed) if self._stopped: - _logger.info( - 'Engine has been stopped, stopping to execute jobs.') + _logger.info("Engine has been stopped, stopping to execute jobs.") break self._stopped = True - _logger.info('Engine executing finished') + _logger.info("Engine executing finished") def stop(self): """Stops engine and running job. Do nothing if engine already been stopped.""" if self._stopped: - _logger.info('Engine already stopped, do nothing.') + _logger.info("Engine already stopped, do nothing.") return - _logger.info('Stopping engine') + _logger.info("Stopping engine") if self._running_job: - _logger.info('Attempting to stop the running job.') + _logger.info("Attempting to stop the running job.") self._running_job.terminate() - _logger.info('Stopping job finished.') + _logger.info("Stopping job finished.") self._stopped = True -class Job(object): +class Job: """Job class represents a single request to send HTTP request until reached it's stop condition. """ @@ -121,27 +119,30 @@ def __init__(self, request, context, checkpoint_mgr, proxy=None): def _get_max_iteration_count(self): mode_max_count = self._iteration_mode.iteration_count default_max_count = defaults.max_iteration_count - return min(default_max_count, mode_max_count) \ - if mode_max_count > 0 else default_max_count + return ( + min(default_max_count, mode_max_count) + if mode_max_count > 0 + else default_max_count + ) def terminate(self, block=True, timeout=30): """Terminate this job, the current thread will blocked util - the job is terminate finished if block is True """ + the job is terminate finished if block is True""" if self.is_stopped(): - _logger.info('Job already been stopped.') + _logger.info("Job already been stopped.") return if self._running_thread == threading.current_thread(): - _logger.warning('Job cannot terminate itself.') + _logger.warning("Job cannot terminate itself.") return - _logger.info('Stopping job') + _logger.info("Stopping job") self._should_stop = True if not block: return if not self._terminated.wait(timeout): - _logger.warning('Terminating job timeout.') + _logger.warning("Terminating job timeout.") def _set_context(self, key, value): self._context[key] = value @@ -161,12 +162,11 @@ def _on_pre_process(self): pre_processor = self._request.pre_process if pre_processor.should_skipped(self._context): - _logger.info('Skip pre process condition satisfied, do nothing') + _logger.info("Skip pre process condition satisfied, do nothing") return tasks = pre_processor.pipeline - _logger.debug( - 'Got %s tasks need be executed before process', len(tasks)) + _logger.debug("Got %s tasks need be executed before process", len(tasks)) self._execute_tasks(tasks) def _on_post_process(self): @@ -176,21 +176,18 @@ def _on_post_process(self): post_processor = self._request.post_process if post_processor.should_skipped(self._context): - _logger.info('Skip post process condition satisfied, ' - 'do nothing') + _logger.info("Skip post process condition satisfied, " "do nothing") return tasks = post_processor.pipeline - _logger.debug( - 'Got %s tasks need to be executed after process', len(tasks) - ) + _logger.debug("Got %s tasks need to be executed after process", len(tasks)) self._execute_tasks(tasks) def _update_checkpoint(self): """Updates checkpoint based on checkpoint namespace and content.""" checkpoint = self._request.checkpoint if not checkpoint: - _logger.info('Checkpoint not specified, do not update it.') + _logger.info("Checkpoint not specified, do not update it.") return self._checkpoint_mgr.update_ckpt( @@ -201,7 +198,7 @@ def _update_checkpoint(self): def _get_checkpoint(self): checkpoint = self._request.checkpoint if not checkpoint: - _logger.info('Checkpoint not specified, do not read it.') + _logger.info("Checkpoint not specified, do not read it.") return namespaces = checkpoint.normalize_namespace(self._context) @@ -213,14 +210,15 @@ def _is_stoppable(self): """Check if repeat mode conditions satisfied.""" if self._request_iterated_count >= self._max_iteration_count: _logger.info( - 'Job iteration count is %s, current request count is %s,' - ' stop condition satisfied.', - self._max_iteration_count, self._request_iterated_count + "Job iteration count is %s, current request count is %s," + " stop condition satisfied.", + self._max_iteration_count, + self._request_iterated_count, ) return True if self._iteration_mode.passed(self._context): - _logger.info('Job stop condition satisfied.') + _logger.info("Job stop condition satisfied.") return True return False @@ -230,25 +228,25 @@ def is_stopped(self): return self._stopped def run(self): - """Start job and exit util meet stop condition. """ - _logger.info('Start to process job') + """Start job and exit util meet stop condition.""" + _logger.info("Start to process job") self._stopped = False try: self._running_thread = threading.current_thread() self._run() except Exception: - _logger.exception('Error encountered while running job.') + _logger.exception("Error encountered while running job.") raise finally: self._terminated.set() self._stopped = True - _logger.info('Job processing finished') + _logger.info("Job processing finished") def _check_should_stop(self): if self._should_stop: - _logger.info('Job should been stopped.') + _logger.info("Job should been stopped.") return self._should_stop def _run(self): @@ -263,7 +261,9 @@ def _run(self): try: self._on_pre_process() except StopCCEIteration: - _logger.info('Stop iteration command in pre process is received, exit job now.') + _logger.info( + "Stop iteration command in pre process is received, exit job now." + ) return r = request.render(self._context) @@ -277,18 +277,20 @@ def _run(self): response, need_terminate = self._send_request(r) if need_terminate: - _logger.info('This job need to be terminated.') + _logger.info("This job need to be terminated.") break self._request_iterated_count += 1 - self._set_context('__response__', response) + self._set_context("__response__", response) if self._check_should_stop(): return try: self._on_post_process() except StopCCEIteration: - _logger.info('Stop iteration command in post process is received, exit job now.') + _logger.info( + "Stop iteration command in post process is received, exit job now." + ) return if self._check_should_stop(): @@ -296,7 +298,7 @@ def _run(self): self._update_checkpoint() if self._is_stoppable(): - _logger.info('Stop condition reached, exit job now') + _logger.info("Stop condition reached, exit job now") break def _send_request(self, request): @@ -306,26 +308,30 @@ def _send_request(self, request): response = self._client.send(request) except HTTPError as error: _logger.exception( - 'HTTPError reason=%s when sending request to ' - 'url=%s method=%s', error.reason, request.url, request.method) + "HTTPError reason=%s when sending request to " "url=%s method=%s", + error.reason, + request.url, + request.method, + ) return None, True status = response.status_code if status in defaults.success_statuses: - if not (response.body or '').strip(): + if not (response.body or "").strip(): _logger.info( - 'The response body of request which url=%s and' - ' method=%s is empty, status=%s.', - request.url, request.method, status + "The response body of request which url=%s and" + " method=%s is empty, status=%s.", + request.url, + request.method, + status, ) return None, True return response, False - error_log = ('The response status=%s for request which url=%s and' - ' method=%s.') % ( - status, request.url, request.method - ) + error_log = ( + "The response status=%s for request which url=%s and" " method=%s." + ) % (status, request.url, request.method) if status in defaults.warning_statuses: _logger.warning(error_log) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine_v2.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine_v2.py old mode 100755 new mode 100644 index a777d7a..250ade6 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine_v2.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/engine_v2.py @@ -13,27 +13,32 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import object import concurrent.futures as cf import threading from collections import Iterable -from ..common.log import get_cc_logger from os import path as op + +from ..common.log import get_cc_logger from .plugin import init_pipeline_plugins -logger = get_cc_logger() +logger = get_cc_logger() -class CloudConnectEngine(object): - def __init__(self, max_workers=4): +class CloudConnectEngine: + def __init__(self, max_workers=4, plugin_dir=""): + """ + Initialize CloudConnectEngine object + :param max_workers: maximum number of Threads to execute the given calls + :param plugin_dir: Absolute path of directory containing cce_plugin_*.py + """ self._executor = cf.ThreadPoolExecutor(max_workers) self._pending_job_results = set() self._shutdown = False self._pending_jobs = [] self._counter = 0 self._lock = threading.RLock() - init_pipeline_plugins( - op.join(op.dirname(op.dirname(__file__)), "plugin")) + plugin_dir = plugin_dir or op.join(op.dirname(op.dirname(__file__)), "plugin") + init_pipeline_plugins(plugin_dir) def start(self, jobs=None): """ @@ -54,8 +59,9 @@ def start(self, jobs=None): break # check the intermediate results to find the done jobs and not # done jobs - done_and_not_done_jobs = cf.wait(self._pending_job_results, - return_when=cf.FIRST_COMPLETED) + done_and_not_done_jobs = cf.wait( + self._pending_job_results, return_when=cf.FIRST_COMPLETED + ) self._pending_job_results = done_and_not_done_jobs.not_done done_job_results = done_and_not_done_jobs.done for future in done_job_results: @@ -68,7 +74,7 @@ def start(self, jobs=None): self._add_job(temp) else: self._add_job(result) - except: + except Exception: logger.exception("CloudConnectEngine encountered exception") finally: self._teardown() @@ -86,8 +92,7 @@ def _add_job(self, job): result = self._executor.submit(self._invoke_job, job) self._pending_job_results.add(result) self._counter += 1 - logger.debug("%s job(s) have been added to the engine now", - self._counter) + logger.debug("%s job(s) have been added to the engine now", self._counter) return True def _invoke_job(self, job): @@ -102,7 +107,7 @@ def _invoke_job(self, job): return None invoke_result = job.run() return invoke_result - except: + except Exception: logger.exception("job %s is invoked with exception", job) return None finally: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/exceptions.py index 8738682..3b0ccf7 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/exceptions.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/exceptions.py @@ -22,32 +22,36 @@ class CCEError(Exception): class ConfigException(CCEError): """Config exception""" + pass class FuncException(CCEError): """Ext function call exception""" + pass class HTTPError(CCEError): - """ HTTPError raised when HTTP request returned a error.""" + """HTTPError raised when HTTP request returned a error.""" def __init__(self, reason=None): """ Initialize HTTPError with `response` object and `status`. """ self.reason = reason - super(HTTPError, self).__init__(reason) + super().__init__(reason) class StopCCEIteration(CCEError): """Exception to exit from the engine iteration.""" + pass class CCESplitError(CCEError): """Exception to exit the job in Split Task""" + pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/ext.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/ext.py index cc93735..abe0875 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/ext.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/ext.py @@ -13,34 +13,36 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import str -from builtins import range import calendar import json import re import traceback from collections import Iterable from datetime import datetime -import six from jsonpath_ng import parse -from .exceptions import FuncException, StopCCEIteration, QuitJobError + +from ..common import log, util +from .exceptions import FuncException, QuitJobError, StopCCEIteration from .pipemgr import PipeManager -from ..common import util, log _logger = log.get_cc_logger() def regex_search(pattern, source, flags=0): """Search substring in source through regex""" - if not isinstance(source, six.string_types): - _logger.warning('Cannot apply regex search on non-string: %s', type(source)) + if not isinstance(source, str): + _logger.warning("Cannot apply regex search on non-string: %s", type(source)) return {} try: matches = re.search(pattern=pattern, string=source, flags=flags) except Exception: - _logger.warning('Unable to search pattern=%s and flags=%s in string, error=%s', - pattern, flags, traceback.format_exc()) + _logger.warning( + "Unable to search pattern=%s and flags=%s in string, error=%s", + pattern, + flags, + traceback.format_exc(), + ) return {} else: return matches.groupdict() if matches else {} @@ -59,49 +61,36 @@ def regex_match(pattern, source, flags=0): return re.match(pattern, source, flags) is not None except Exception: _logger.warning( - 'Unable to match source with pattern=%s, cause=%s', + "Unable to match source with pattern=%s, cause=%s", pattern, - traceback.format_exc() + traceback.format_exc(), ) return False -def regex_not_match(pattern, source, flags=0): - """ - Determine whether a string is not match a regex pattern. - - :param pattern: regex expression - :param source: candidate to match regex - :param flags: flags for regex match - :return: `True` if candidate not match pattern else `False` - """ - return not regex_match(pattern, source, flags) - - def json_path(source, json_path_expr): - """ Extract value from string with JSONPATH expression. + """Extract value from string with JSONPATH expression. :param json_path_expr: JSONPATH expression :param source: string to extract value :return: A `list` contains all values extracted """ if not source: - _logger.debug('source to apply JSONPATH is empty, return empty.') - return '' + _logger.debug("source to apply JSONPATH is empty, return empty.") + return "" - if isinstance(source, six.string_types): + if isinstance(source, str): _logger.debug( - 'source expected is a JSON, not %s. Attempt to' - ' convert it to JSON', - type(source) + "source expected is a JSON, not %s. Attempt to" " convert it to JSON", + type(source), ) try: source = json.loads(source) except Exception as ex: _logger.warning( - 'Unable to load JSON from source: %s. ' + "Unable to load JSON from source: %s. " 'Attempt to apply JSONPATH "%s" on source directly.', ex, - json_path_expr + json_path_expr, ) try: @@ -110,31 +99,29 @@ def json_path(source, json_path_expr): _logger.debug( 'Got %s elements extracted with JSONPATH expression "%s"', - len(results), json_path_expr + len(results), + json_path_expr, ) if not results: - return '' + return "" - return results[0] or '' if len(results) == 1 else results + return results[0] or "" if len(results) == 1 else results except Exception as ex: _logger.warning( 'Unable to apply JSONPATH expression "%s" on source,' - ' message=%s cause=%s', + " message=%s cause=%s", json_path_expr, ex, - traceback.format_exc() + traceback.format_exc(), ) - return '' + return "" -def splunk_xml(candidates, - time=None, - index=None, - host=None, - source=None, - sourcetype=None): - """ Wrap a event with splunk xml format. +def splunk_xml( + candidates, time=None, index=None, host=None, source=None, sourcetype=None +): + """Wrap a event with splunk xml format. :param candidates: data used to wrap as event :param time: timestamp which must be empty or a valid float :param index: index name for event @@ -152,8 +139,7 @@ def splunk_xml(candidates, time = float(time) except ValueError: _logger.warning( - '"time" %s is expected to be a float, set "time" to None', - time + '"time" %s is expected to be a float, set "time" to None', time ) time = None xml_events = util.format_events( @@ -162,60 +148,58 @@ def splunk_xml(candidates, index=index, host=host, source=source, - sourcetype=sourcetype + sourcetype=sourcetype, ) - _logger.info( - "[%s] events are formated as splunk stream xml", - len(candidates) - ) + _logger.info("[%s] events are formated as splunk stream xml", len(candidates)) return xml_events def std_output(candidates): - """ Output a string to stdout. + """Output a string to stdout. :param candidates: List of string to output to stdout or a single string. """ - if isinstance(candidates, six.string_types): + if isinstance(candidates, str): candidates = [candidates] all_str = True for candidate in candidates: - if all_str and not isinstance(candidate, six.string_types): + if all_str and not isinstance(candidate, str): all_str = False _logger.debug( - 'The type of data needs to print is "%s" rather than %s', + 'The type of data needs to print is "%s" rather than str', type(candidate), - str(six.string_types) ) try: candidate = json.dumps(candidate) except: - _logger.exception('The type of data needs to print is "%s"' - ' rather than %s', - type(candidate), - str(six.string_types)) + _logger.exception( + 'The type of data needs to print is "%s"' " rather than str", + type(candidate), + ) if not PipeManager().write_events(candidate): - raise FuncException('Fail to output data to stdout. The event' - ' writer is stopped or encountered exception') + raise FuncException( + "Fail to output data to stdout. The event" + " writer is stopped or encountered exception" + ) - _logger.debug('Writing events to stdout finished.') + _logger.debug("Writing events to stdout finished.") return True def _parse_json(source, json_path_expr=None): if not source: - _logger.debug('Unable to parse JSON from empty source, return empty.') + _logger.debug("Unable to parse JSON from empty source, return empty.") return {} if json_path_expr: _logger.debug( - 'Try to extract JSON from source with JSONPATH expression: %s, ', - json_path_expr + "Try to extract JSON from source with JSONPATH expression: %s, ", + json_path_expr, ) source = json_path(source, json_path_expr) - elif isinstance(source, six.string_types): + elif isinstance(source, str): source = json.loads(source) return source @@ -236,8 +220,9 @@ def json_empty(source, json_path_expr=None): return len(data) == 0 except Exception as ex: _logger.warning( - 'Unable to determine whether source is json_empty, treat it as ' - 'not json_empty: %s', ex + "Unable to determine whether source is json_empty, treat it as " + "not json_empty: %s", + ex, ) return False @@ -258,9 +243,9 @@ def json_not_empty(source, json_path_expr=None): return len(data) > 0 except Exception as ex: _logger.warning( - 'Unable to determine whether source is json_not_empty, ' - 'treat it as not json_not_empty: %s', - ex + "Unable to determine whether source is json_not_empty, " + "treat it as not json_not_empty: %s", + ex, ) return False @@ -282,24 +267,25 @@ def _fix_microsecond_format(fmt, micros): def do_replacement(x, micros): if int(x.group(1)) in range(1, 7) and len(x.group()) % 2: - return x.group().replace('%' + x.group(1) + 'f', - micros[:min(int(x.group(1)), len(micros))]) + return x.group().replace( + "%" + x.group(1) + "f", micros[: min(int(x.group(1)), len(micros))] + ) return x.group() - return re.sub(r'%+([1-6])f', lambda x: do_replacement(x, micros), fmt) + return re.sub(r"%+([1-6])f", lambda x: do_replacement(x, micros), fmt) def _fix_timestamp_format(fmt, timestamp): """Replace '%s' in time format with timestamp if the number - of '%' before 's' is odd.""" + of '%' before 's' is odd.""" return re.sub( - r'%+s', + r"%+s", ( - lambda x: - x.group() if len(x.group()) % 2 else x.group().replace('%s', - timestamp) + lambda x: x.group() + if len(x.group()) % 2 + else x.group().replace("%s", timestamp) ), - fmt + fmt, ) @@ -307,11 +293,11 @@ def time_str2str(date_string, from_format, to_format): """Convert a date string with given format to another format. Return the original date string if it's type is not string or failed to parse or convert it with format.""" - if not isinstance(date_string, six.string_types): + if not isinstance(date_string, str): _logger.warning( '"date_string" must be a string type, found %s,' - ' return the original date_string directly.', - type(date_string) + " return the original date_string directly.", + type(date_string), ) return date_string @@ -329,18 +315,18 @@ def time_str2str(date_string, from_format, to_format): except Exception: _logger.warning( 'Unable to convert date_string "%s" from format "%s" to "%s",' - ' return the original date_string, cause=%s', + " return the original date_string, cause=%s", date_string, from_format, to_format, - traceback.format_exc() + traceback.format_exc(), ) return date_string def is_true(value): """Determine whether value is True""" - return str(value).strip().lower() == 'true' + return str(value).strip().lower() == "true" def exit_if_true(value): @@ -358,9 +344,7 @@ def exit_job_if_true(value): def assert_true(value, message=None): """Assert value is True""" if not is_true(value): - raise AssertionError( - message or '"{value}" is not true'.format(value=value) - ) + raise AssertionError(message or f'"{value}" is not true') def split_by(source, target, separator=None): @@ -368,43 +352,45 @@ def split_by(source, target, separator=None): try: if not source: return [] - elif isinstance(source, six.string_types) and separator: + elif isinstance(source, str) and separator: values = source.split(separator) return [{target: value.strip()} for value in values] - elif isinstance(source, six.string_types): + elif isinstance(source, str): return [{target: source}] elif isinstance(source, Iterable): return [{target: value} for value in source] else: return [{target: source}] except Exception as ex: - _logger.warning("split_by method encountered exception " - "source=%s message=%s cause=%s", source, ex, - traceback.format_exc()) + _logger.warning( + "split_by method encountered exception " "source=%s message=%s cause=%s", + source, + ex, + traceback.format_exc(), + ) return [] _extension_functions = { - 'assert_true': assert_true, - 'exit_if_true': exit_if_true, - 'exit_job_if_true': exit_job_if_true, - 'is_true': is_true, - 'regex_match': regex_match, - 'regex_not_match': regex_not_match, - 'regex_search': regex_search, - 'set_var': set_var, - 'splunk_xml': splunk_xml, - 'std_output': std_output, - 'json_path': json_path, - 'json_empty': json_empty, - 'json_not_empty': json_not_empty, - 'time_str2str': time_str2str, - 'split_by': split_by + "assert_true": assert_true, + "exit_if_true": exit_if_true, + "exit_job_if_true": exit_job_if_true, + "is_true": is_true, + "regex_match": regex_match, + "regex_search": regex_search, + "set_var": set_var, + "splunk_xml": splunk_xml, + "std_output": std_output, + "json_path": json_path, + "json_empty": json_empty, + "json_not_empty": json_not_empty, + "time_str2str": time_str2str, + "split_by": split_by, } def lookup_method(name): - """ Find a predefined function with given function name. + """Find a predefined function with given function name. :param name: function name. :return: A function with given name. """ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/http.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/http.py index 5992d47..e9d4675 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/http.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/http.py @@ -13,74 +13,61 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import str -from builtins import range -from builtins import object import time import traceback -import six +import requests + import munch +from requests import PreparedRequest, Session, utils +from solnlib.utils import is_true from cloudconnectlib.common import util from cloudconnectlib.common.log import get_cc_logger from cloudconnectlib.core import defaults from cloudconnectlib.core.exceptions import HTTPError -from httplib2 import Http, socks, ProxyInfo -from requests import PreparedRequest, utils -from solnlib.utils import is_true - -try: # Python2 environment support - from httplib2 import SSLHandshakeError -except: # Python3 environment support - from ssl import SSLError as SSLHandshakeError _logger = get_cc_logger() -_PROXY_TYPE_MAP = { - 'http': socks.PROXY_TYPE_HTTP, - 'socks4': socks.PROXY_TYPE_SOCKS4, - 'socks5': socks.PROXY_TYPE_SOCKS5, -} - -class HTTPResponse(object): +class HTTPResponse: """ HTTPResponse class wraps response of HTTP request for later use. """ def __init__(self, response, content): """Construct a HTTPResponse from response and content returned - with httplib2 request""" - self._status_code = response.status + with requests.Session() request""" + self._status_code = response.status_code self._header = response - self._body = self._decode_content(response, content) + self._body = self._decode_content(response.headers, content) @staticmethod def _decode_content(response, content): if not content: - return '' + return "" charset = utils.get_encoding_from_headers(response) if charset is None: charset = defaults.charset _logger.info( - 'Unable to find charset in response headers,' - ' set it to default "%s"', charset + "Unable to find charset in response headers," ' set it to default "%s"', + charset, ) - _logger.info('Decoding response content with charset=%s', charset) + _logger.info("Decoding response content with charset=%s", charset) try: - return content.decode(charset, errors='replace') + return content.decode(charset, errors="replace") except Exception as ex: _logger.warning( - 'Failure decoding response content with charset=%s,' - ' decode it with utf-8: %s', - charset, ex + "Failure decoding response content with charset=%s," + " decode it with utf-8: %s", + charset, + ex, ) - return content.decode('utf-8', errors='replace') + return content.decode("utf-8", errors="replace") @property def header(self): @@ -117,139 +104,160 @@ def prepare_url(url, params=None): return prepare_url -def get_proxy_info(proxy_config): - if not proxy_config or not is_true(proxy_config.get('proxy_enabled')): - _logger.info('Proxy is not enabled') - return None +def get_proxy_info(proxy_config: dict) -> dict: + """ + @proxy_config: dict like object of the format - + { + "proxy_url": my-proxy.server.com, + "proxy_port": 0000, + "proxy_username": username, + "proxy_password": password, + "proxy_type": http or sock5, + "proxy_rdns": 0 or 1, + } + """ + proxy_info = {} + + if not proxy_config or not is_true(proxy_config.get("proxy_enabled")): + _logger.info("Proxy is not enabled") + return {} + + proxy_type = proxy_config.get("proxy_type", "").lower() + if proxy_type not in ("http", "socks5"): + proxy_type = "http" + _logger.info('Proxy type not found, set to "HTTP"') + + if is_true(proxy_config.get("proxy_rdns")) and proxy_type == "socks5": + proxy_type = "socks5h" - url = proxy_config.get('proxy_url') - port = proxy_config.get('proxy_port') + url = proxy_config.get("proxy_url") + port = proxy_config.get("proxy_port") if url or port: if not url: raise ValueError('Proxy "url" must not be empty') if not util.is_valid_port(port): - raise ValueError( - 'Proxy "port" must be in range [1,65535]: %s' % port - ) - - user = proxy_config.get('proxy_username') - password = proxy_config.get('proxy_password') + raise ValueError('Proxy "port" must be in range [1,65535]: %s' % port) - if not all((user, password)): - _logger.info('Proxy has no credentials found') - user, password = None, None + proxy_info["http"] = f"{proxy_type}://{url}:{int(port)}" + user = proxy_config.get("proxy_username") + password = proxy_config.get("proxy_password") - proxy_type = proxy_config.get('proxy_type') - proxy_type = proxy_type.lower() if proxy_type else 'http' - - if proxy_type in _PROXY_TYPE_MAP: - ptv = _PROXY_TYPE_MAP[proxy_type] - elif proxy_type in list(_PROXY_TYPE_MAP.values()): - ptv = proxy_type + if all((user, password)): + proxy_info["http"] = f"{proxy_type}://{user}:{password}@{url}:{int(port)}" else: - ptv = socks.PROXY_TYPE_HTTP - _logger.info('Proxy type not found, set to "HTTP"') + _logger.info("Proxy has no credentials found") - rdns = is_true(proxy_config.get('proxy_rdns')) - - proxy_info = ProxyInfo( - proxy_host=url, - proxy_port=int(port), - proxy_type=ptv, - proxy_user=user, - proxy_pass=password, - proxy_rdns=rdns - ) + proxy_info["https"] = proxy_info["http"] return proxy_info + def standardize_proxy_config(proxy_config): """ - This function is used to standardize the proxy information structure to get it evaluated through `get_proxy_info` function + This function is used to standardize the proxy information structure to + get it evaluated through `get_proxy_info` function """ if not isinstance(proxy_config, dict): - raise ValueError("Received unexpected format of proxy configuration. Expected format: object, Actual format: {}".format(type(proxy_config))) + raise ValueError( + "Received unexpected format of proxy configuration. " + "Expected format: object, Actual format: {}".format(type(proxy_config)) + ) standard_proxy_config = { "proxy_enabled": proxy_config.get("enabled", proxy_config.get("proxy_enabled")), - "proxy_username": proxy_config.get("username", proxy_config.get("proxy_username")), - "proxy_password": proxy_config.get("password", proxy_config.get("proxy_password")), + "proxy_username": proxy_config.get( + "username", proxy_config.get("proxy_username") + ), + "proxy_password": proxy_config.get( + "password", proxy_config.get("proxy_password") + ), "proxy_url": proxy_config.get("host", proxy_config.get("proxy_url")), "proxy_type": proxy_config.get("type", proxy_config.get("proxy_type")), "proxy_port": proxy_config.get("port", proxy_config.get("proxy_port")), - "proxy_rdns": proxy_config.get("rdns", proxy_config.get("proxy_rdns")) + "proxy_rdns": proxy_config.get("rdns", proxy_config.get("proxy_rdns")), } return standard_proxy_config -class HttpClient(object): - def __init__(self, proxy_info=None): - """Constructs a `HTTPRequest` with a optional proxy setting. +class HttpClient: + def __init__(self, proxy_info=None, verify=True): + """ + Constructs a `HTTPRequest` with a optional proxy setting. + :param proxy_info: a dictionary of proxy details. It could directly match the input signature + of `requests` library, otherwise will be standardized and converted to match the input signature. + :param verify: same as the `verify` parameter of requests.request() method """ self._connection = None - + self.requests_verify = verify + if proxy_info: if isinstance(proxy_info, munch.Munch): proxy_info = dict(proxy_info) - # Updating the proxy_info object to make it compatible for getting evaluated through `get_proxy_info` function - proxy_info = standardize_proxy_config(proxy_info) - self._proxy_info = get_proxy_info(proxy_info) + if all((len(proxy_info) == 2, "http" in proxy_info, "https" in proxy_info)): + # when `proxy_info` already matches the input signature of `requests` library's proxy dict + self._proxy_info = proxy_info + else: + # Updating the proxy_info object to make it compatible for getting evaluated + # through `get_proxy_info` function + proxy_info = standardize_proxy_config(proxy_info) + self._proxy_info = get_proxy_info(proxy_info) else: self._proxy_info = proxy_info self._url_preparer = PreparedRequest() - def _send_internal(self, uri, method, headers=None, body=None, proxy_info=None): - """Do send request to target URL and validate SSL cert by default. - If validation failed, disable it and try again.""" - try: - return self._connection.request( - uri, body=body, method=method, headers=headers - ) - except SSLHandshakeError: - _logger.warning( - "[SSL: CERTIFICATE_VERIFY_FAILED] certificate verification failed. " - "The certificate of the https server [%s] is not trusted, " - "this add-on will proceed to connect with this certificate. " - "You may need to check the certificate and " - "refer to the documentation and add it to the trust list. %s", - uri, - traceback.format_exc() - ) - - self._connection = self._build_http_connection( - proxy_info=proxy_info, - disable_ssl_cert_validation=True - ) - return self._connection.request( - uri, body=body, method=method, headers=headers - ) + def _send_internal(self, uri, method, headers=None, body=None): + """Do send request to target URL, validate SSL cert by default and return the response.""" + return self._connection.request( + url=uri, + data=body, + method=method, + headers=headers, + timeout=defaults.timeout, + verify=self.requests_verify, + ) - def _retry_send_request_if_needed(self, uri, method='GET', headers=None, body=None): + def _retry_send_request_if_needed(self, uri, method="GET", headers=None, body=None): """Invokes request and auto retry with an exponential backoff if the response status is configured in defaults.retry_statuses.""" retries = max(defaults.retries, 0) - _logger.info('Invoking request to [%s] using [%s] method', uri, method) + _logger.info("Invoking request to [%s] using [%s] method", uri, method) for i in range(retries + 1): try: - response, content = self._send_internal( + resp = self._send_internal( uri=uri, body=body, method=method, headers=headers ) + content = resp.content + response = resp + except requests.exceptions.SSLError as err: + _logger.error( + "[SSL: CERTIFICATE_VERIFY_FAILED] certificate verification failed. " + "The certificate of the https server [%s] is not trusted, " + "You may need to check the certificate and " + "refer to the documentation and add it to the trust list. %s", + uri, + traceback.format_exc(), + ) + raise HTTPError(f"HTTP Error {err}") from err except Exception as err: _logger.exception( - 'Could not send request url=%s method=%s', uri, method) - raise HTTPError('HTTP Error %s' % str(err)) + "Could not send request url=%s method=%s", uri, method + ) + raise HTTPError(f"HTTP Error {err}") from err - status = response.status + status = resp.status_code if self._is_need_retry(status, i, retries): - delay = 2 ** i + delay = 2**i _logger.warning( - 'The response status=%s of request which url=%s and' - ' method=%s. Retry after %s seconds.', - status, uri, method, delay, + "The response status=%s of request which url=%s and" + " method=%s. Retry after %s seconds.", + status, + uri, + method, + delay, ) time.sleep(delay) continue @@ -262,16 +270,16 @@ def _prepare_url(self, url, params=None): def _initialize_connection(self): if self._proxy_info: - _logger.info('Proxy is enabled for http connection.') + _logger.info("Proxy is enabled for http connection.") else: - _logger.info('Proxy is not enabled for http connection.') + _logger.info("Proxy is not enabled for http connection.") self._connection = self._build_http_connection(self._proxy_info) def send(self, request): if not request: - raise ValueError('The request is none') - if request.body and not isinstance(request.body, six.string_types): - raise TypeError('Invalid request body type: {}'.format(request.body)) + raise ValueError("The request is none") + if request.body and not isinstance(request.body, str): + raise TypeError(f"Invalid request body type: {request.body}") if self._connection is None: self._initialize_connection() @@ -280,8 +288,7 @@ def send(self, request): url = self._prepare_url(request.url) except Exception: _logger.warning( - 'Failed to encode url=%s: %s', - request.url, traceback.format_exc() + "Failed to encode url=%s: %s", request.url, traceback.format_exc() ) url = request.url @@ -291,15 +298,18 @@ def send(self, request): @staticmethod def _build_http_connection( - proxy_info=None, - timeout=defaults.timeout, - disable_ssl_cert_validation=defaults.disable_ssl_cert_validation): - return Http( - proxy_info=proxy_info, - timeout=timeout, - disable_ssl_certificate_validation=disable_ssl_cert_validation) + proxy_info=None, + disable_ssl_cert_validation=defaults.disable_ssl_cert_validation, + ): + """ + Creates a `request.Session()` object, sets the verify + and proxy_info parameter and returns this object + """ + s = Session() + s.verify = not disable_ssl_cert_validation + s.proxies = proxy_info or {} + return s @staticmethod def _is_need_retry(status, retried, maximum_retries): - return retried < maximum_retries \ - and status in defaults.retry_statuses + return retried < maximum_retries and status in defaults.retry_statuses diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/job.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/job.py old mode 100755 new mode 100644 index 497c9f6..090eacd --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/job.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/job.py @@ -13,18 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import object import threading +from ..common import log from .exceptions import QuitJobError from .task import BaseTask -from ..common import log - logger = log.get_cc_logger() -class CCEJob(object): +class CCEJob: """ One CCEJob is composed of a list of tasks. The task could be HTTP task or Split task(currently supported task types). @@ -62,15 +60,17 @@ def set_proxy(self, proxy_setting): :type proxy_setting: ``dict`` """ self._proxy_info = proxy_setting - logger.debug("CCEJob proxy info: proxy_enabled='%s', proxy_url='%s', " - "proxy_port='%s', proxy_rdns='%s', proxy_type='%s', " - "proxy_username='%s'", - proxy_setting.get("proxy_enabled"), - proxy_setting.get("proxy_url"), - proxy_setting.get("proxy_port"), - proxy_setting.get("proxy_rdns"), - proxy_setting.get("proxy_type"), - proxy_setting.get("proxy_username")) + logger.debug( + "CCEJob proxy info: proxy_enabled='%s', proxy_url='%s', " + "proxy_port='%s', proxy_rdns='%s', proxy_type='%s', " + "proxy_username='%s'", + proxy_setting.get("proxy_enabled"), + proxy_setting.get("proxy_url"), + proxy_setting.get("proxy_port"), + proxy_setting.get("proxy_rdns"), + proxy_setting.get("proxy_type"), + proxy_setting.get("proxy_username"), + ) def add_task(self, task): """ @@ -80,14 +80,14 @@ def add_task(self, task): :type task: TBD """ if not isinstance(task, BaseTask): - raise ValueError('Unsupported task type: {}'.format(type(task))) + raise ValueError(f"Unsupported task type: {type(task)}") if callable(getattr(task, "set_proxy", None)) and self._proxy_info: task.set_proxy(self._proxy_info) self._rest_tasks.append(task) def _check_if_stop_needed(self): if self._stop_signal_received: - logger.info('Stop job signal received, stopping job.') + logger.info("Stop job signal received, stopping job.") self._stopped.set() return True return False @@ -96,10 +96,10 @@ def run(self): """ Run current job, which executes tasks in it sequentially. """ - logger.debug('Start to run job') + logger.debug("Start to run job") if not self._rest_tasks: - logger.info('No task found in job') + logger.info("No task found in job") return if self._check_if_stop_needed(): @@ -111,29 +111,35 @@ def run(self): try: contexts = list(self._running_task.perform(self._context) or ()) except QuitJobError: - logger.info('Quit job signal received, exiting job') + logger.info("Quit job signal received, exiting job") return if self._check_if_stop_needed(): return if not self._rest_tasks: - logger.info('No more task need to perform, exiting job') + logger.info("No more task need to perform, exiting job") return - jobs = [CCEJob(context=ctx, tasks=self._rest_tasks) for ctx in contexts] + count = 0 + + for ctx in contexts: + count += 1 + yield CCEJob(context=ctx, tasks=self._rest_tasks) + + if self._check_if_stop_needed(): + break - logger.debug('Generated %s job in total', len(jobs)) - logger.debug('Job execution finished successfully.') + logger.debug("Generated %s job in total", count) + logger.debug("Job execution finished successfully.") self._stopped.set() - return jobs def stop(self, block=False, timeout=30): """ Stop current job. """ if self._stopped.is_set(): - logger.info('Job is not running, cannot stop it.') + logger.info("Job is not running, cannot stop it.") return self._stop_signal_received = True @@ -143,12 +149,12 @@ def stop(self, block=False, timeout=30): return if not self._stopped.wait(timeout): - logger.info('Waiting for stop job timeout') + logger.info("Waiting for stop job timeout") def __str__(self): if self._running_task: - return 'Job(running task={})'.format(self._running_task) - return 'Job(no running task)' + return f"Job(running task={self._running_task})" + return "Job(no running task)" def __repr__(self): return self.__str__() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/models.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/models.py index 90854c8..603e1d0 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/models.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/models.py @@ -13,22 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import str -from builtins import object import base64 import json import sys import traceback -import six +from ..common.log import get_cc_logger from .ext import lookup_method from .template import compile_template -from ..common.log import get_cc_logger _logger = get_cc_logger() -class _Token(object): +class _Token: """Token class wraps a template expression""" def __init__(self, source): @@ -36,8 +33,7 @@ def __init__(self, source): will be created if source is string type because Jinja template must be a string.""" self._source = source - self._value_for = compile_template(source) \ - if isinstance(source, six.string_types) else None + self._value_for = compile_template(source) if isinstance(source, str) else None def render(self, variables): """Render value with variables if source is a string. @@ -49,31 +45,30 @@ def render(self, variables): except Exception as ex: _logger.warning( 'Unable to render template "%s". Please make sure template is' - ' a valid Jinja2 template and token is exist in variables. ' - 'message=%s cause=%s', + " a valid Jinja2 template and token is exist in variables. " + "message=%s cause=%s", self._source, ex, - traceback.format_exc() + traceback.format_exc(), ) return self._source -class DictToken(object): +class DictToken: """DictToken wraps a dict which value is template expression""" def __init__(self, template_expr): - self._tokens = {k: _Token(v) - for k, v in (template_expr or {}).items()} + self._tokens = {k: _Token(v) for k, v in (template_expr or {}).items()} def render(self, variables): return {k: v.render(variables) for k, v in self._tokens.items()} -class BaseAuth(object): +class BaseAuth: """A base class for all authorization classes""" def __call__(self, headers, context): - raise NotImplementedError('Auth must be callable.') + raise NotImplementedError("Auth must be callable.") class BasicAuthorization(BaseAuth): @@ -81,19 +76,19 @@ class BasicAuthorization(BaseAuth): def __init__(self, options): if not options: - raise ValueError('Options for basic auth unexpected to be empty') + raise ValueError("Options for basic auth unexpected to be empty") - username = options.get('username') + username = options.get("username") if not username: - raise ValueError('Username is mandatory for basic auth') - password = options.get('password') + raise ValueError("Username is mandatory for basic auth") + password = options.get("password") if not password: - raise ValueError('Password is mandatory for basic auth') + raise ValueError("Password is mandatory for basic auth") self._username = _Token(username) self._password = _Token(password) - def to_native_string(self, string, encoding='ascii'): + def to_native_string(self, string, encoding="ascii"): """ According to rfc7230: Historically, HTTP has allowed field content with text in the @@ -104,8 +99,8 @@ def to_native_string(self, string, encoding='ascii'): US-ASCII octets. A recipient SHOULD treat other octets in field content (obs-text) as opaque data. """ - is_py2 = (sys.version_info[0] == 2) - if isinstance(string, six.text_type): + is_py2 = sys.version_info[0] == 2 + if isinstance(string, str): out = string else: if is_py2: @@ -118,12 +113,15 @@ def to_native_string(self, string, encoding='ascii'): def __call__(self, headers, context): username = self._username.render(context) password = self._password.render(context) - headers['Authorization'] = 'Basic %s' % self.to_native_string( - base64.b64encode((username + ':' + password).encode('latin1')) - ).strip() + headers["Authorization"] = ( + "Basic %s" + % self.to_native_string( + base64.b64encode((username + ":" + password).encode("latin1")) + ).strip() + ) -class RequestParams(object): +class RequestParams: def __init__(self, url, method, header=None, auth=None, body=None): self._header = DictToken(header) self._url = _Token(url) @@ -156,7 +154,7 @@ def render(self, ctx): url=self._url.render(ctx), method=self._method, headers=self.normalize_headers(ctx), - body=self.body.render(ctx) + body=self.body.render(ctx), ) def normalize_url(self, context): @@ -174,19 +172,19 @@ def normalize_body(self, context): return self.body.render(context) -class Request(object): +class Request: def __init__(self, method, url, headers, body): self.method = method self.url = url self.headers = headers if not body: body = None - elif not isinstance(body, six.string_types): + elif not isinstance(body, str): body = json.dumps(body) self.body = body -class _Function(object): +class _Function: def __init__(self, inputs, function): self._inputs = tuple(_Token(expr) for expr in inputs or []) self._function = function @@ -211,7 +209,7 @@ class Task(_Function): """Task class wraps a task in processor pipeline""" def __init__(self, inputs, function, output=None): - super(Task, self).__init__(inputs, function) + super().__init__(inputs, function) self._output = output @property @@ -219,14 +217,16 @@ def output(self): return self._output def execute(self, context): - """Execute task with arguments which rendered from context """ + """Execute task with arguments which rendered from context""" args = [arg for arg in self.inputs_values(context)] caller = lookup_method(self.function) output = self._output _logger.info( - 'Executing task method: [%s], input size: [%s], output: [%s]', - self.function, len(args), output + "Executing task method: [%s], input size: [%s], output: [%s]", + self.function, + len(args), + output, ) if output is None: @@ -249,8 +249,9 @@ def calculate(self, context): callable_method = lookup_method(self.function) _logger.debug( - 'Calculating condition with method: [%s], input size: [%s]', - self.function, len(args) + "Calculating condition with method: [%s], input size: [%s]", + self.function, + len(args), ) result = callable_method(*args) @@ -260,7 +261,7 @@ def calculate(self, context): return result -class _Conditional(object): +class _Conditional: """A base class for all conditional action""" def __init__(self, conditions): @@ -275,16 +276,14 @@ def passed(self, context): :param context: variables to render template :return: `True` if all passed else `False` """ - return any( - condition.calculate(context) for condition in self._conditions - ) + return any(condition.calculate(context) for condition in self._conditions) class Processor(_Conditional): """Processor class contains a conditional data process pipeline""" def __init__(self, skip_conditions, pipeline): - super(Processor, self).__init__(skip_conditions) + super().__init__(skip_conditions) self._pipeline = pipeline or [] @property @@ -298,7 +297,7 @@ def should_skipped(self, context): class IterationMode(_Conditional): def __init__(self, iteration_count, conditions): - super(IterationMode, self).__init__(conditions) + super().__init__(conditions) self._iteration_count = iteration_count @property @@ -310,14 +309,14 @@ def conditions(self): return self._conditions -class Checkpoint(object): +class Checkpoint: """A checkpoint includes a namespace to determine the checkpoint location and a content defined the format of content stored in checkpoint.""" def __init__(self, namespace, content): - """Constructs checkpoint with given namespace and content template. """ + """Constructs checkpoint with given namespace and content template.""" if not content: - raise ValueError('Checkpoint content must not be empty') + raise ValueError("Checkpoint content must not be empty") self._namespace = tuple(_Token(expr) for expr in namespace or ()) self._content = DictToken(content) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/pipemgr.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/pipemgr.py index d56615a..e62ac54 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/pipemgr.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/pipemgr.py @@ -13,18 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from __future__ import print_function -from builtins import object from solnlib.pattern import Singleton -from future.utils import with_metaclass -class PipeManager(with_metaclass(Singleton, object)): +class PipeManager(metaclass=Singleton): def __init__(self, event_writer=None): self._event_writer = event_writer def write_events(self, events): if not self._event_writer: - print(events) + print(events, flush=True) return True return self._event_writer.write_events(events) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/plugin.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/plugin.py old mode 100755 new mode 100644 index d92ca2e..c40451f --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/plugin.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/plugin.py @@ -13,22 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from __future__ import absolute_import -from builtins import next -from .ext import _extension_functions +import importlib +import sys +import traceback from os import path as op from os import walk -import sys + from ..common import log -import traceback -import importlib +from .ext import _extension_functions logger = log.get_cc_logger() def cce_pipeline_plugin(func): """ - Decorator for pipepline plugin functions. + Decorator for pipeline plugin functions. This docorator helps to register user defined pipeline function into CCE engine so that it could be looked up when executing jobs. @@ -42,19 +41,23 @@ def cce_pipeline_plugin(func): >>> do_work() """ if not callable(func): - logger.debug("Function %s is not callable, don't add it as a pipeline" - " function", func.__name__) + logger.debug( + "Function %s is not callable, don't add it as a pipeline function", + func.__name__, + ) else: if func.__name__ in list(_extension_functions.keys()): - logger.warning("Pipeline function %s already exists, please rename" - "it!", func.__name__) + logger.warning( + "Pipeline function %s already exists, please rename it!", + func.__name__, + ) else: _extension_functions[func.__name__] = func - logger.debug("Added function %s to pipeline plugin system", - func.__name__) + logger.debug("Added function %s to pipeline plugin system", func.__name__) def pipeline_func(*args, **kwargs): return func(*args, **kwargs) + return pipeline_func @@ -68,21 +71,24 @@ def import_plugin_file(file_name): if file_name.endswith(".py"): module_name = file_name[:-3] else: - logger.warning("Plugin file %s is with unsupported extenstion, the " - "supported are py", file_name) + logger.warning( + "Plugin file %s is with unsupported extension, the supported are py", + file_name, + ) return if module_name in list(sys.modules.keys()): - logger.warning("Module %s aleady exists and it won't be reload, " - "please rename your plugin module if it is required.", - module_name) + logger.debug( + "Module %s already exists and it won't be reload, " + "please rename your plugin module if it is required.", + module_name, + ) return try: importlib.import_module(module_name) except Exception: - logger.warning("Failed to load module {}, {}".format( - module_name, traceback.format_exc())) + logger.warning(f"Failed to load module {module_name}, {traceback.format_exc()}") return logger.info("Module %s is imported", module_name) @@ -98,8 +104,9 @@ def init_pipeline_plugins(plugin_dir): with ".py" """ if not op.isdir(plugin_dir): - logger.warning("%s is not a directory! Pipeline plugin files won't be loaded.", - plugin_dir) + logger.warning( + "%s is not a directory! Pipeline plugin files won't be loaded.", plugin_dir + ) return if plugin_dir not in sys.path: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/task.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/task.py old mode 100755 new mode 100644 index e134261..e8246eb --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/task.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/task.py @@ -13,31 +13,30 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import object import copy import threading from abc import abstractmethod -import six -import re from cloudconnectlib.common.log import get_cc_logger from cloudconnectlib.core import defaults from cloudconnectlib.core.checkpoint import CheckpointManagerAdapter -from cloudconnectlib.core.exceptions import HTTPError -from cloudconnectlib.core.exceptions import StopCCEIteration, CCESplitError, QuitJobError -from cloudconnectlib.core.ext import lookup_method, regex_search -from cloudconnectlib.core.http import get_proxy_info, HttpClient -from cloudconnectlib.core.models import DictToken, _Token, BasicAuthorization, Request +from cloudconnectlib.core.exceptions import ( + CCESplitError, + HTTPError, + QuitJobError, + StopCCEIteration, +) +from cloudconnectlib.core.ext import lookup_method +from cloudconnectlib.core.http import HttpClient, get_proxy_info +from cloudconnectlib.core.models import BasicAuthorization, DictToken, Request, _Token logger = get_cc_logger() -_RESPONSE_KEY = '__response__' -_AUTH_TYPES = { - 'basic_auth': BasicAuthorization -} +_RESPONSE_KEY = "__response__" +_AUTH_TYPES = {"basic_auth": BasicAuthorization} -class ProcessHandler(object): +class ProcessHandler: def __init__(self, method, arguments, output): self.method = method self.arguments = [_Token(arg) for arg in arguments or ()] @@ -45,7 +44,7 @@ def __init__(self, method, arguments, output): def execute(self, context): args = [arg.render(context) for arg in self.arguments] - logger.debug('%s arguments found for method %s', len(args), self.method) + logger.debug("%s arguments found for method %s", len(args), self.method) callable_method = lookup_method(self.method) result = callable_method(*args) @@ -56,7 +55,7 @@ def execute(self, context): return data -class Condition(object): +class Condition: def __init__(self, method, arguments): self.method = method self.arguments = [_Token(arg) for arg in arguments or ()] @@ -64,11 +63,11 @@ def __init__(self, method, arguments): def is_meet(self, context): args = [arg.render(context) for arg in self.arguments] callable_method = lookup_method(self.method) - logger.debug('%s arguments found for method %s', len(args), self.method) + logger.debug("%s arguments found for method %s", len(args), self.method) return callable_method(*args) -class ConditionGroup(object): +class ConditionGroup: def __init__(self): self._conditions = [] @@ -76,12 +75,10 @@ def add(self, condition): self._conditions.append(condition) def is_meet(self, context): - return any( - cdn.is_meet(context) for cdn in self._conditions - ) + return any(cdn.is_meet(context) for cdn in self._conditions) -class ProxyTemplate(object): +class ProxyTemplate: def __init__(self, proxy_setting): self._proxy = DictToken(proxy_setting or {}) @@ -90,31 +87,31 @@ def render(self, context): return get_proxy_info(rendered) -class RequestTemplate(object): +class RequestTemplate: def __init__(self, request): if not request: - raise ValueError('The request is none') - url = request.get('url') + raise ValueError("The request is none") + url = request.get("url") if not url: raise ValueError("The request doesn't contain a url or it's empty") self.url = _Token(url) - self.nextpage_url = _Token(request.get('nextpage_url', url)) - self.headers = DictToken(request.get('headers', {})) + self.nextpage_url = _Token(request.get("nextpage_url", url)) + self.headers = DictToken(request.get("headers", {})) # Request body could be string or dict - body = request.get('body') + body = request.get("body") if isinstance(body, dict): self.body = DictToken(body) - elif isinstance(body, six.string_types): + elif isinstance(body, str): self.body = _Token(body) else: if body: - logger.warning('Invalid request body: %s', body) + logger.warning("Invalid request body: %s", body) self.body = None - method = request.get('method', 'GET') - if not method or method.upper() not in ('GET', 'POST'): - raise ValueError('Unsupported value for request method: {}'.format(method)) + method = request.get("method", "GET") + if not method or method.upper() not in ("GET", "POST"): + raise ValueError(f"Unsupported value for request method: {method}") self.method = _Token(method) self.count = 0 @@ -133,11 +130,11 @@ def render(self, context): url=url, method=self.method.render(context), headers=self.headers.render(context), - body=self.body.render(context) if self.body else None + body=self.body.render(context) if self.body else None, ) -class BaseTask(object): +class BaseTask: def __init__(self, name): self._name = name self._pre_process_handler = [] @@ -158,6 +155,7 @@ def add_preprocess_handler(self, method, input, output=None): """ handler = ProcessHandler(method, input, output) self._pre_process_handler.append(handler) + def add_preprocess_handler_batch(self, handlers): """ Add multiple preprocess handlers. All handlers will be maintained and @@ -205,6 +203,7 @@ def add_postprocess_handler_batch(self, handlers): """ for method, args, output in handlers: self.add_postprocess_handler(method, args, output) + def add_postprocess_skip_condition(self, method, input): """ Add a preprocess skip condition. The skip_conditions for postprocess @@ -222,10 +221,10 @@ def add_postprocess_skip_condition(self, method, input): @staticmethod def _execute_handlers(skip_conditions, handlers, context, phase): if skip_conditions.is_meet(context): - logger.debug('%s process skip conditions are met', phase.capitalize()) + logger.debug("%s process skip conditions are met", phase.capitalize()) return if not handlers: - logger.debug('No handler found in %s process', phase) + logger.debug("No handler found in %s process", phase) return for handler in handlers: @@ -233,19 +232,23 @@ def _execute_handlers(skip_conditions, handlers, context, phase): if data: # FIXME context.update(data) - logger.debug('Execute handlers finished successfully.') + if context.get("is_token_refreshed"): + # In case of OAuth flow after refreshing access token retrying again with the query to collect records + logger.info( + "The access token is refreshed hence skipping the rest post process handler tasks. Retrying again." + ) + return + logger.debug("Execute handlers finished successfully.") def _pre_process(self, context): - self._execute_handlers(self._skip_pre_conditions, - self._pre_process_handler, - context, - 'pre') + self._execute_handlers( + self._skip_pre_conditions, self._pre_process_handler, context, "pre" + ) def _post_process(self, context): - self._execute_handlers(self._skip_post_conditions, - self._post_process_handler, - context, - 'post') + self._execute_handlers( + self._skip_post_conditions, self._post_process_handler, context, "post" + ) @abstractmethod def perform(self, context): @@ -265,43 +268,43 @@ class CCESplitTask(BaseTask): OUTPUT_KEY = "__cce_split_result__" def __init__(self, name): - super(CCESplitTask, self).__init__(name) + super().__init__(name) self._process_handler = None self._source = None def configure_split(self, method, source, output, separator=None): arguments = [source, output, separator] self._source = source - self._process_handler = ProcessHandler(method, arguments, - CCESplitTask.OUTPUT_KEY) + self._process_handler = ProcessHandler( + method, arguments, CCESplitTask.OUTPUT_KEY + ) def perform(self, context): - logger.debug('Task=%s start to run', self) + logger.debug("Task=%s start to run", self) try: self._pre_process(context) except StopCCEIteration: - logger.info('Task=%s exits in pre_process stage', self) + logger.info("Task=%s exits in pre_process stage", self) yield context return if not self._process_handler: - logger.info('Task=%s has no split method', self) + logger.info("Task=%s has no split method", self) raise CCESplitError try: invoke_results = self._process_handler.execute(context) - except: + except Exception: logger.exception("Task=%s encountered exception", self) raise CCESplitError - if not invoke_results or not \ - invoke_results.get(CCESplitTask.OUTPUT_KEY): + if not invoke_results or not invoke_results.get(CCESplitTask.OUTPUT_KEY): raise CCESplitError for invoke_result in invoke_results[CCESplitTask.OUTPUT_KEY]: new_context = copy.deepcopy(context) new_context.update(invoke_result) yield new_context - logger.debug('Task=%s finished', self) + logger.debug("Task=%s finished", self) class CCEHTTPRequestTask(BaseTask): @@ -313,8 +316,20 @@ class CCEHTTPRequestTask(BaseTask): from context when executing. """ - def __init__(self, request, name, meta_config=None, task_config=None): - super(CCEHTTPRequestTask, self).__init__(name) + def __init__(self, request, name, meta_config=None, task_config=None, **kwargs): + """ + :param verify: Absolute path to server certificate, otherwise uses + requests' default certificate to verify server's TLS certificate. + Explicitly set it to False to not verify TLS certificate. + :type verify: ``string or bool`` + :param custom_func: Custom error code handling for HTTP codes: + the function should accept `request`, `response` and `logger` parameters + To let the library handle the status code, return a non-list object + To handle status code using custom logic, return (response, bool). + Bool decides whether to break or continue the code flow + :type custom_func: ``function`` + """ + super().__init__(name) self._request = RequestTemplate(request) self._stop_conditions = ConditionGroup() self._proxy_info = None @@ -324,16 +339,20 @@ def __init__(self, request, name, meta_config=None, task_config=None): self._task_config = task_config self._meta_config = meta_config + self._http_client = None self._authorizer = None self._stopped = threading.Event() self._stop_signal_received = False + if kwargs.get("custom_func"): + self.custom_handle_status_code = kwargs["custom_func"] + self.requests_verify = kwargs.get("verify", True) def stop(self, block=False, timeout=30): """ Stop current task. """ if self._stopped.is_set(): - logger.info('Task=%s is not running, cannot stop it.', self) + logger.info("Task=%s is not running, cannot stop it.", self) return self._stop_signal_received = True @@ -341,11 +360,11 @@ def stop(self, block=False, timeout=30): return if not self._stopped.wait(timeout): - logger.info('Waiting for stop task %s timeout', self) + logger.info("Waiting for stop task %s timeout", self) def _check_if_stop_needed(self): if self._stop_signal_received: - logger.info('Stop task signal received, stopping task %s.', self) + logger.info("Stop task signal received, stopping task %s.", self) self._stopped.set() return True return False @@ -378,10 +397,10 @@ def set_auth(self, auth_type, settings): :type settings: ``dict`` """ if not auth_type: - raise ValueError('Invalid auth type={}'.format(auth_type)) + raise ValueError(f"Invalid auth type={auth_type}") authorizer_cls = _AUTH_TYPES.get(auth_type.lower()) if not authorizer_cls: - raise ValueError('Unsupported auth type={}'.format(auth_type)) + raise ValueError(f"Unsupported auth type={auth_type}") self._authorizer = authorizer_cls(settings) def set_iteration_count(self, count): @@ -398,8 +417,10 @@ def set_iteration_count(self, count): except ValueError: self._max_iteration_count = defaults.max_iteration_count logger.warning( - 'Invalid iteration count: %s, using default max iteration count: %s', - count, self._max_iteration_count) + "Invalid iteration count: %s, using default max iteration count: %s", + count, + self._max_iteration_count, + ) def add_stop_condition(self, method, input): """ @@ -423,53 +444,61 @@ def configure_checkpoint(self, name, content): :type content: ``dict`` """ if not name or not name.strip(): - raise ValueError('Invalid checkpoint name: "{}"'.format(name)) + raise ValueError(f'Invalid checkpoint name: "{name}"') if not content: - raise ValueError('Invalid checkpoint content: {}'.format(content)) + raise ValueError(f"Invalid checkpoint content: {content}") self._checkpointer = CheckpointManagerAdapter( namespaces=name, content=content, meta_config=self._meta_config, - task_config=self._task_config + task_config=self._task_config, ) def _should_exit(self, done_count, context): if 0 < self._max_iteration_count <= done_count: - logger.info('Iteration count reached %s', self._max_iteration_count) + logger.info("Iteration count reached %s", self._max_iteration_count) return True if self._stop_conditions.is_meet(context): - logger.info('Stop conditions are met') + logger.info("Stop conditions are met") return True return False - @staticmethod - def _send_request(client, request): + def _send_request(self, request): try: - response = client.send(request) + response = self._http_client.send(request) except HTTPError as error: logger.exception( - 'Error occurred in request url=%s method=%s reason=%s', - request.url, request.method, error.reason + "Error occurred in request url=%s method=%s reason=%s", + request.url, + request.method, + error.reason, ) return None, True status = response.status_code if status in defaults.success_statuses: - if not (response.body or '').strip(): + if not (response.body or "").strip(): logger.info( - 'The response body of request which url=%s and' - ' method=%s is empty, status=%s.', - request.url, request.method, status + "The response body of request which url=%s and" + " method=%s is empty, status=%s.", + request.url, + request.method, + status, ) return None, True return response, False - error_log = ('The response status=%s for request which url=%s and' - ' method=%s.') % ( - status, request.url, request.method - ) + if "custom_handle_status_code" in dir(self): + returned_items = self.custom_handle_status_code(request, response, logger) + if isinstance(returned_items, (list, tuple)): + return returned_items[0], returned_items[1] + + error_log = ( + "The response status=%s for request which url=%s and" + " method=%s and message=%s " + ) % (status, request.url, request.method, response.body) if status in defaults.warning_statuses: logger.warning(error_log) @@ -480,33 +509,38 @@ def _send_request(client, request): def _persist_checkpoint(self, context): if not self._checkpointer: - logger.debug('Checkpoint is not configured. Skip persisting checkpoint.') + logger.debug("Checkpoint is not configured. Skip persisting checkpoint.") return try: self._checkpointer.save(context) except Exception: - logger.exception('Error while persisting checkpoint') + logger.exception("Error while persisting checkpoint") else: - logger.debug('Checkpoint has been updated successfully.') + logger.debug("Checkpoint has been updated successfully.") def _load_checkpoint(self, ctx): if not self._checkpointer: - logger.debug('Checkpoint is not configured. Skip loading checkpoint.') + logger.debug("Checkpoint is not configured. Skip loading checkpoint.") return {} return self._checkpointer.load(ctx=ctx) def _prepare_http_client(self, ctx): proxy = self._proxy_info.render(ctx) if self._proxy_info else None - return HttpClient(proxy) + self._http_client = HttpClient(proxy, self.requests_verify) + + def _flush_checkpoint(self): + if self._checkpointer: + # Flush checkpoint cache to disk + self._checkpointer.close() def perform(self, context): - logger.info('Starting to perform task=%s', self) + logger.info("Starting to perform task=%s", self) - client = self._prepare_http_client(context) + self._prepare_http_client(context) done_count = 0 context.update(self._load_checkpoint(context)) - update_source = False if context.get('source') else True + update_source = False if context.get("source") else True self._request.reset() while True: @@ -526,17 +560,19 @@ def perform(self, context): if self._authorizer: self._authorizer(r.headers, context) - response, need_exit = self._send_request(client, r) + response, need_exit = self._send_request(r) context[_RESPONSE_KEY] = response if need_exit: - logger.info('Task=%s need been terminated due to request response', self) + logger.info( + "Task=%s need been terminated due to request response", self + ) break if self._check_if_stop_needed(): break if update_source: - context['source'] = r.url.split('?')[0] + context["source"] = r.url.split("?")[0] try: self._post_process(context) @@ -555,12 +591,10 @@ def perform(self, context): done_count += 1 if self._should_exit(done_count, context): break - if update_source and context.get('source'): - del context['source'] + if update_source and context.get("source"): + del context["source"] yield context self._stopped.set() - if self._checkpointer: - # Flush checkpoint cache to disk - self._checkpointer.close() - logger.info('Perform task=%s finished', self) + self._flush_checkpoint() + logger.info("Perform task=%s finished", self) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/template.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/template.py index f4e16fc..fe8c1d4 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/template.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/core/template.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from jinja2 import Template import re +from jinja2 import Template + # This pattern matches the template with only one token inside like "{{ # token1}}", "{{ token2 }" PATTERN = re.compile(r"^\{\{\s*(\w+)\s*\}\}$") @@ -29,7 +30,7 @@ def translate_internal(context): match = re.match(PATTERN, _origin_template) if match: context_var = context.get(match.groups()[0]) - return context_var if context_var else '' + return context_var if context_var else "" return _template.render(context) return translate_internal diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py index 77f3ab6..3af567a 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py @@ -13,21 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from future import standard_library -standard_library.install_aliases() import configparser import os.path as op +from ..common.lib_util import get_app_root_dir, get_main_file, get_mod_input_script_name from .data_collection import ta_mod_input as ta_input from .ta_cloud_connect_client import TACloudConnectClient as CollectorCls -from ..common.lib_util import ( - get_main_file, get_app_root_dir, get_mod_input_script_name -) def _load_options_from_inputs_spec(app_root, stanza_name): - input_spec_file = 'inputs.conf.spec' - file_path = op.join(app_root, 'README', input_spec_file) + input_spec_file = "inputs.conf.spec" + file_path = op.join(app_root, "README", input_spec_file) if not op.isfile(file_path): raise RuntimeError("README/%s doesn't exist" % input_spec_file) @@ -35,7 +31,7 @@ def _load_options_from_inputs_spec(app_root, stanza_name): parser = configparser.RawConfigParser(allow_no_value=True) parser.read(file_path) options = list(parser.defaults().keys()) - stanza_prefix = '%s://' % stanza_name + stanza_prefix = "%s://" % stanza_name stanza_exist = False for section in parser.sections(): @@ -49,21 +45,24 @@ def _load_options_from_inputs_spec(app_root, stanza_name): def _find_ucc_global_config_json(app_root, ucc_config_filename): """Find UCC config file from all possible directories""" - candidates = ['local', 'default', 'bin', - op.join('appserver', 'static', 'js', 'build')] + candidates = [ + "local", + "default", + "bin", + op.join("appserver", "static", "js", "build"), + ] for candidate in candidates: file_path = op.join(app_root, candidate, ucc_config_filename) if op.isfile(file_path): return file_path raise RuntimeError( - 'Unable to load %s from [%s]' - % (ucc_config_filename, ','.join(candidates)) + "Unable to load {} from [{}]".format(ucc_config_filename, ",".join(candidates)) ) def _get_cloud_connect_config_json(script_name): - config_file_name = '.'.join([script_name, 'cc.json']) + config_file_name = ".".join([script_name, "cc.json"]) return op.join(op.dirname(get_main_file()), config_file_name) @@ -73,9 +72,7 @@ def run(single_instance=False): cce_config_file = _get_cloud_connect_config_json(script_name) app_root = get_app_root_dir() - ucc_config_path = _find_ucc_global_config_json( - app_root, 'globalConfig.json' - ) + ucc_config_path = _find_ucc_global_config_json(app_root, "globalConfig.json") schema_params = _load_options_from_inputs_spec(app_root, script_name) ta_input.main( @@ -84,5 +81,5 @@ def run(single_instance=False): log_suffix=script_name, cc_json_file=cce_config_file, schema_para_list=schema_params, - single_instance=single_instance + single_instance=single_instance, ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/__init__.py index 966cdf2..4c58c45 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/__init__.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import json import hashlib +import json def load_schema_file(schema_file): @@ -64,4 +64,3 @@ class UCCException(Exception): """ pass - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/log.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/log.py index 446e1ac..826e5f2 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/log.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/log.py @@ -14,8 +14,8 @@ # limitations under the License. # import logging -from splunktalib.common import log as stclog -import six + +from solnlib import log def set_log_level(log_level): @@ -23,33 +23,39 @@ def set_log_level(log_level): Set log level. """ - if isinstance(log_level, six.string_types): + if isinstance(log_level, str): if log_level.upper() == "DEBUG": - stclog.Logs().set_level(logging.DEBUG) + log.Logs().set_level(logging.DEBUG) elif log_level.upper() == "INFO": - stclog.Logs().set_level(logging.INFO) + log.Logs().set_level(logging.INFO) elif log_level.upper() == "WARN": - stclog.Logs().set_level(logging.WARN) + log.Logs().set_level(logging.WARN) elif log_level.upper() == "ERROR": - stclog.Logs().set_level(logging.ERROR) + log.Logs().set_level(logging.ERROR) elif log_level.upper() == "WARNING": - stclog.Logs().set_level(logging.WARNING) + log.Logs().set_level(logging.WARNING) elif log_level.upper() == "CRITICAL": - stclog.Logs().set_level(logging.CRITICAL) + log.Logs().set_level(logging.CRITICAL) else: - stclog.Logs().set_level(logging.INFO) + log.Logs().set_level(logging.INFO) elif isinstance(log_level, int): - if log_level in [logging.DEBUG, logging.INFO, logging.ERROR, - logging.WARN, logging.WARNING, logging.CRITICAL]: - stclog.Logs().set_level(log_level) + if log_level in [ + logging.DEBUG, + logging.INFO, + logging.ERROR, + logging.WARN, + logging.WARNING, + logging.CRITICAL, + ]: + log.Logs().set_level(log_level) else: - stclog.Logs().set_level(logging.INFO) + log.Logs().set_level(logging.INFO) else: - stclog.Logs().set_level(logging.INFO) + log.Logs().set_level(logging.INFO) # Global logger -logger = stclog.Logs().get_logger("cloud_connect_engine") +logger = log.Logs().get_logger("cloud_connect_engine") def reset_logger(name): @@ -57,9 +63,5 @@ def reset_logger(name): Reset logger. """ - stclog.reset_logger(name) - global logger - logger = stclog.Logs().get_logger(name) - - + logger = log.Logs().get_logger(name) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/rwlock.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/rwlock.py index 1825619..e91ba85 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/rwlock.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/rwlock.py @@ -17,11 +17,10 @@ This module provides Read-Write lock. """ -from builtins import object import threading -class _ReadLocker(object): +class _ReadLocker: def __init__(self, lock): self.lock = lock @@ -33,7 +32,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): return False -class _WriteLocker(object): +class _WriteLocker: def __init__(self, lock): self.lock = lock @@ -45,8 +44,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): return False -class RWLock(object): - """ Simple Read-Write lock. +class RWLock: + """Simple Read-Write lock. Allow multiple read but only one writing concurrently. """ @@ -84,4 +83,3 @@ def reader_lock(self): @property def writer_lock(self): return _WriteLocker(self) - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/schema_meta.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/schema_meta.py index 7e98191..1a7f1fa 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/schema_meta.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/common/schema_meta.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # -FIELD_PRODUCT = '_product' -FIELD_REST_NAMESPACE = '_rest_namespace' -FIELD_REST_PREFIX = '_rest_prefix' -FIELD_PROTOCOL_VERSION = '_protocol_version' -FIELD_VERSION = '_version' -FIELD_ENCRYPTION_FORMATTER = '_encryption_formatter' +FIELD_PRODUCT = "_product" +FIELD_REST_NAMESPACE = "_rest_namespace" +FIELD_REST_PREFIX = "_rest_prefix" +FIELD_PROTOCOL_VERSION = "_protocol_version" +FIELD_VERSION = "_version" +FIELD_ENCRYPTION_FORMATTER = "_encryption_formatter" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/config.py index 5546a55..a88c75c 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/config.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/config.py @@ -17,26 +17,17 @@ This is for load/save configuration in UCC server or TA. The load/save action is based on specified schema. """ - -from __future__ import absolute_import - -from future import standard_library -standard_library.install_aliases() -from builtins import str -from builtins import range -from builtins import object import json import logging -import traceback import time -import six +import traceback +from urllib.parse import quote -from splunktalib.rest import splunkd_request, code_to_msg -from splunktalib.common import util as sc_util +from solnlib import utils +from splunktalib.rest import code_to_msg, splunkd_request -from .common import log as stulog from .common import UCCException -from urllib.parse import quote +from .common import log as stulog LOGGING_STOPPED = False @@ -51,7 +42,7 @@ def stop_logging(): LOGGING_STOPPED = True -def log(msg, msgx='', level=logging.INFO, need_tb=False): +def log(msg, msgx="", level=logging.INFO, need_tb=False): """ Logging in UCC Config Module. :param msg: message content @@ -64,42 +55,45 @@ def log(msg, msgx='', level=logging.INFO, need_tb=False): if LOGGING_STOPPED: return - msgx = ' - ' + msgx if msgx else '' - content = 'UCC Config Module: %s%s' % (msg, msgx) + msgx = " - " + msgx if msgx else "" + content = f"UCC Config Module: {msg}{msgx}" if need_tb: - stack = ''.join(traceback.format_stack()) - content = '%s\r\n%s' % (content, stack) + stack = "".join(traceback.format_stack()) + content = f"{content}\r\n{stack}" stulog.logger.log(level, content, exc_info=1) class ConfigException(UCCException): - """Exception for UCC Config Exception - """ + """Exception for UCC Config Exception""" + pass -class Config(object): - """UCC Config Module - """ +class Config: + """UCC Config Module""" # Placeholder stands for any field - FIELD_PLACEHOLDER = '*' + FIELD_PLACEHOLDER = "*" # Head of non-processing endpoint - NON_PROC_ENDPOINT = '#' + NON_PROC_ENDPOINT = "#" # Some meta fields in UCC Config schema - META_FIELDS = ('_product', '_rest_namespace', '_rest_prefix', - '_protocol_version', '_version', - '_encryption_formatter') + META_FIELDS = ( + "_product", + "_rest_namespace", + "_rest_prefix", + "_protocol_version", + "_version", + "_encryption_formatter", + ) # Default Values for Meta fields META_FIELDS_DEFAULT = { - '_encryption_formatter': '', + "_encryption_formatter": "", } - def __init__(self, splunkd_uri, session_key, schema, - user='nobody', app='-'): + def __init__(self, splunkd_uri, session_key, schema, user="nobody", app="-"): """ :param splunkd_uri: the root uri of Splunk server, like https://127.0.0.1:8089 @@ -109,7 +103,7 @@ def __init__(self, splunkd_uri, session_key, schema, :param app: namespace of the resources requested :return: """ - self.splunkd_uri = splunkd_uri.strip('/') + self.splunkd_uri = splunkd_uri.strip("/") self.session_key = session_key self.user, self.app = user, app self._parse_schema(schema) @@ -122,27 +116,27 @@ def load(self): """ log('"load" method in', level=logging.DEBUG) - ret = {meta_field: getattr(self, meta_field) - for meta_field in Config.META_FIELDS} + ret = { + meta_field: getattr(self, meta_field) for meta_field in Config.META_FIELDS + } for ep_id, ep in self._endpoints.items(): - data = {'output_mode': 'json', '--cred--': '1'} + data = {"output_mode": "json", "--cred--": "1"} retries = 4 waiting_time = [1, 2, 2] for retry in range(retries): - resp, cont = splunkd_request( + response = splunkd_request( splunkd_uri=self.make_uri(ep_id), session_key=self.session_key, data=data, - retry=3 + retry=3, ) - - if resp is None or resp.status != 200: - msg = 'Fail to load endpoint "{ep_id}" - {err}' \ - ''.format(ep_id=ep_id, - err=code_to_msg(resp, cont) - if resp else cont) + cont = response.text + if response is None or response.status_code != 200: + msg = 'Fail to load endpoint "{ep_id}" - {err}' "".format( + ep_id=ep_id, err=code_to_msg(response) + ) log(msg, level=logging.ERROR, need_tb=True) raise ConfigException(msg) @@ -150,7 +144,7 @@ def load(self): ret[ep_id] = self._parse_content(ep_id, cont) except ConfigException as exc: log(exc, level=logging.WARNING, need_tb=True) - if retry < retries-1: + if retry < retries - 1: time.sleep(waiting_time[retry]) else: break @@ -161,8 +155,9 @@ def load(self): log('"load" method out', level=logging.DEBUG) return ret - def update_items(self, endpoint_id, item_names, field_names, data, - raise_if_failed=False): + def update_items( + self, endpoint_id, item_names, field_names, data, raise_if_failed=False + ): """Update items in specified endpoint with given fields in data :param endpoint_id: endpoint id in schema, the key name in schema :param item_names: a list of item name @@ -183,40 +178,49 @@ def update_items(self, endpoint_id, item_names, field_names, data, If raise_if_failed is True, it will exist with an exception on any updating failed. """ - log('"update_items" method in', - msgx='endpoint_id=%s, item_names=%s, field_names=%s' - % (endpoint_id, item_names, field_names), - level=logging.DEBUG) - - assert endpoint_id in self._endpoints, \ - 'Unexpected endpoint id in given schema - {ep_id}' \ - ''.format(ep_id=endpoint_id) + log( + '"update_items" method in', + msgx="endpoint_id=%s, item_names=%s, field_names=%s" + % (endpoint_id, item_names, field_names), + level=logging.DEBUG, + ) + + assert ( + endpoint_id in self._endpoints + ), "Unexpected endpoint id in given schema - {ep_id}" "".format( + ep_id=endpoint_id + ) item_names_failed = [] for item_name in item_names: item_data = data.get(item_name, {}) - item_data = {field_name: self.dump_value(endpoint_id, - item_name, - field_name, - item_data[field_name]) - for field_name in field_names - if field_name in item_data} + item_data = { + field_name: self.dump_value( + endpoint_id, item_name, field_name, item_data[field_name] + ) + for field_name in field_names + if field_name in item_data + } if not item_data: continue item_uri = self.make_uri(endpoint_id, item_name=item_name) - resp, cont = splunkd_request(splunkd_uri=item_uri, - session_key=self.session_key, - data=item_data, - method="POST", - retry=3 - ) - if resp is None or resp.status not in (200, 201): - msg = 'Fail to update item "{item}" in endpoint "{ep_id}"' \ - ' - {err}'.format(ep_id=endpoint_id, - item=item_name, - err=code_to_msg(resp, cont) - if resp else cont) + response = splunkd_request( + splunkd_uri=item_uri, + session_key=self.session_key, + data=item_data, + method="POST", + retry=3, + ) + if response is None or response.status_code not in (200, 201): + msg = ( + 'Fail to update item "{item}" in endpoint "{ep_id}"' + " - {err}".format( + ep_id=endpoint_id, + item=item_name, + err=code_to_msg(response), + ) + ) log(msg, level=logging.ERROR) if raise_if_failed: raise ConfigException(msg) @@ -231,28 +235,39 @@ def make_uri(self, endpoint_id, item_name=None): :param item_name: item name for given endpoint. None for listing all :return: """ - endpoint = self._endpoints[endpoint_id]['endpoint'] - ep_full = endpoint[1:].strip('/') \ - if endpoint.startswith(Config.NON_PROC_ENDPOINT) else \ - '{admin_match}/{protocol_version}/{endpoint}' \ - ''.format(admin_match=self._rest_namespace, - protocol_version=self._protocol_version, - endpoint=(self._rest_prefix + - self._endpoints[endpoint_id]['endpoint'])) - ep_uri = None if endpoint_id not in self._endpoints else \ - '{splunkd_uri}/servicesNS/{user}/{app}/{endpoint_full}' \ - ''.format(splunkd_uri=self.splunkd_uri, - user=self.user, - app=self.app, - endpoint_full=ep_full - ) - - url = ep_uri if item_name is None else "{ep_uri}/{item_name}"\ - .format(ep_uri=ep_uri, item_name=quote(item_name)) + endpoint = self._endpoints[endpoint_id]["endpoint"] + ep_full = ( + endpoint[1:].strip("/") + if endpoint.startswith(Config.NON_PROC_ENDPOINT) + else "{admin_match}/{protocol_version}/{endpoint}" + "".format( + admin_match=self._rest_namespace, + protocol_version=self._protocol_version, + endpoint=(self._rest_prefix + self._endpoints[endpoint_id]["endpoint"]), + ) + ) + ep_uri = ( + None + if endpoint_id not in self._endpoints + else "{splunkd_uri}/servicesNS/{user}/{app}/{endpoint_full}" + "".format( + splunkd_uri=self.splunkd_uri, + user=self.user, + app=self.app, + endpoint_full=ep_full, + ) + ) + + url = ( + ep_uri + if item_name is None + else "{ep_uri}/{item_name}".format( + ep_uri=ep_uri, item_name=quote(item_name) + ) + ) if item_name is None: - url += '?count=-1' - log('"make_uri" method', msgx='url=%s' % url, - level=logging.DEBUG) + url += "?count=-1" + log('"make_uri" method', msgx="url=%s" % url, level=logging.DEBUG) return url def _parse_content(self, endpoint_id, content): @@ -260,49 +275,59 @@ def _parse_content(self, endpoint_id, content): :param content: a JSON string returned from REST. """ try: - content = json.loads(content)['entry'] - ret = {ent['name']: ent['content'] for ent in content} + content = json.loads(content)["entry"] + ret = {ent["name"]: ent["content"] for ent in content} except Exception as exc: - msg = 'Fail to parse content from endpoint_id=%s' \ - ' - %s' % (endpoint_id, exc) + msg = "Fail to parse content from endpoint_id=%s" " - %s" % ( + endpoint_id, + exc, + ) log(msg, level=logging.ERROR, need_tb=True) raise ConfigException(msg) - ret = {name: {key: self.load_value(endpoint_id, name, key, val) - for key, val in ent.items() - if not key.startswith('eai:')} - for name, ent in ret.items()} + ret = { + name: { + key: self.load_value(endpoint_id, name, key, val) + for key, val in ent.items() + if not key.startswith("eai:") + } + for name, ent in ret.items() + } return ret def _parse_schema(self, ucc_config_schema): try: ucc_config_schema = json.loads(ucc_config_schema) except ValueError: - msg = 'Invalid JSON content of schema' + msg = "Invalid JSON content of schema" log(msg, level=logging.ERROR, need_tb=True) raise ConfigException(msg) except Exception as exc: log(exc, level=logging.ERROR, need_tb=True) raise ConfigException(exc) - ucc_config_schema.update({key: val for key, val in - Config.META_FIELDS_DEFAULT.items() - if key not in ucc_config_schema}) + ucc_config_schema.update( + { + key: val + for key, val in Config.META_FIELDS_DEFAULT.items() + if key not in ucc_config_schema + } + ) for field in Config.META_FIELDS: - assert field in ucc_config_schema and \ - isinstance(ucc_config_schema[field], six.string_types), \ - 'Missing or invalid field "%s" in given schema' % field + assert field in ucc_config_schema and isinstance( + ucc_config_schema[field], str + ), ('Missing or invalid field "%s" in given schema' % field) setattr(self, field, ucc_config_schema[field]) self._endpoints = {} for key, val in ucc_config_schema.items(): - if key.startswith('_'): + if key.startswith("_"): continue - assert isinstance(val, dict), \ + assert isinstance(val, dict), ( 'The schema of endpoint "%s" should be dict' % key - assert 'endpoint' in val, \ - 'The endpoint "%s" has no endpoint entry' % key + ) + assert "endpoint" in val, 'The endpoint "%s" has no endpoint entry' % key self._endpoints[key] = val @@ -313,58 +338,68 @@ def _check_protocol_version(self): """ if not self._protocol_version: return - if not self._protocol_version.startswith('1.'): - raise ConfigException('Unsupported protocol version "%s" ' - 'in given schema' % self._protocol_version) + if not self._protocol_version.startswith("1."): + raise ConfigException( + 'Unsupported protocol version "%s" ' + "in given schema" % self._protocol_version + ) def load_value(self, endpoint_id, item_name, fname, fval): field_type = self._get_field_type(endpoint_id, item_name, fname) - if field_type == '': + if field_type == "": return fval try: field_type = field_type.lower() - if field_type == 'bool': - return True if sc_util.is_true(fval) else False - elif field_type == 'int': + if field_type == "bool": + return True if utils.is_true(fval) else False + elif field_type == "int": return int(fval) - elif field_type == 'json': + elif field_type == "json": return json.loads(fval) except Exception as exc: - msg = 'Fail to load value of "{type_name}" - ' \ - 'endpoint={endpoint}, item={item}, field={field}' \ - ''.format(type_name=field_type, - endpoint=endpoint_id, - item=item_name, - field=fname) + msg = ( + 'Fail to load value of "{type_name}" - ' + "endpoint={endpoint}, item={item}, field={field}" + "".format( + type_name=field_type, + endpoint=endpoint_id, + item=item_name, + field=fname, + ) + ) log(msg, msgx=str(exc), level=logging.WARNING, need_tb=True) raise ConfigException(msg) def dump_value(self, endpoint_id, item_name, fname, fval): field_type = self._get_field_type(endpoint_id, item_name, fname) - if field_type == '': + if field_type == "": return fval try: field_type = field_type.lower() - if field_type == 'bool': + if field_type == "bool": return str(fval).lower() - elif field_type == 'json': + elif field_type == "json": return json.dumps(fval) else: return fval except Exception as exc: - msg = 'Fail to dump value of "{type_name}" - ' \ - 'endpoint={endpoint}, item={item}, field={field}' \ - ''.format(type_name=field_type, - endpoint=endpoint_id, - item=item_name, - field=fname) + msg = ( + 'Fail to dump value of "{type_name}" - ' + "endpoint={endpoint}, item={item}, field={field}" + "".format( + type_name=field_type, + endpoint=endpoint_id, + item=item_name, + field=fname, + ) + ) log(msg, msgx=str(exc), level=logging.ERROR, need_tb=True) raise ConfigException(msg) def _get_field_type(self, endpoint_id, item_name, fname): - field_types = self._endpoints[endpoint_id].get('field_types', {}) + field_types = self._endpoints[endpoint_id].get("field_types", {}) if item_name in field_types: fields = field_types[item_name] elif Config.FIELD_PLACEHOLDER in field_types: @@ -372,14 +407,18 @@ def _get_field_type(self, endpoint_id, item_name, fname): else: fields = {} - field_type = fields.get(fname, '') - if field_type not in ('', 'bool', 'int', 'json'): - msg = 'Unsupported type "{type_name}" for value in schema - ' \ - 'endpoint={endpoint}, item={item}, field={field}' \ - ''.format(type_name=field_type, - endpoint=endpoint_id, - item=item_name, - field=fname) + field_type = fields.get(fname, "") + if field_type not in ("", "bool", "int", "json"): + msg = ( + 'Unsupported type "{type_name}" for value in schema - ' + "endpoint={endpoint}, item={item}, field={field}" + "".format( + type_name=field_type, + endpoint=endpoint_id, + item=item_name, + field=fname, + ) + ) log(msg, level=logging.ERROR, need_tb=True) raise ConfigException(msg) return field_type diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/__init__.py index d40e936..e1dc0ca 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/__init__.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.0.2" \ No newline at end of file +__version__ = "1.0.2" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py index 9c004e7..4e41db9 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_checkpoint_manager.py @@ -13,19 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import object import json -import logging import re +from solnlib.utils import is_true +from splunktalib import state_store as ss + +from ..common import log as stulog from . import ta_consts as c from . import ta_helper as th -from ..common import log as stulog -from splunktalib import state_store as ss -from splunktalib.common.util import is_true -class TACheckPointMgr(object): +class TACheckPointMgr: SEPARATOR = "_" * 3 # FIXME We'd better move all default values together @@ -37,52 +36,48 @@ def __init__(self, meta_config, task_config): self._store = self._create_state_store( meta_config, task_config.get(c.checkpoint_storage_type), - task_config[c.appname] + task_config[c.appname], ) def _create_state_store(self, meta_config, storage_type, app_name): - stulog.logger.debug('Got checkpoint storage type=%s', storage_type) + stulog.logger.debug("Got checkpoint storage type=%s", storage_type) if storage_type == c.checkpoint_kv_storage: collection_name = self._get_collection_name() stulog.logger.debug( - 'Creating KV state store, collection name=%s', collection_name + "Creating KV state store, collection name=%s", collection_name ) return ss.get_state_store( meta_config, appname=app_name, collection_name=collection_name, - use_kv_store=True + use_kv_store=True, ) use_cache_file = self._use_cache_file() - max_cache_seconds = \ - self._get_max_cache_seconds() if use_cache_file else None + max_cache_seconds = self._get_max_cache_seconds() if use_cache_file else None stulog.logger.debug( - 'Creating file state store, use_cache_file=%s, max_cache_seconds=%s', - use_cache_file, max_cache_seconds + "Creating file state store, use_cache_file=%s, max_cache_seconds=%s", + use_cache_file, + max_cache_seconds, ) return ss.get_state_store( - meta_config, - app_name, - use_cached_store=use_cache_file, - max_cache_seconds=max_cache_seconds + meta_config, app_name, use_cached_store=use_cache_file ) def _get_collection_name(self): collection = self._task_config.get(c.collection_name) - collection = collection.strip() if collection else '' + collection = collection.strip() if collection else "" if not collection: input_name = self._task_config[c.mod_input_name] stulog.logger.info( - 'Collection name="%s" is empty, set it to "%s"', - collection, input_name + 'Collection name="%s" is empty, set it to "%s"', collection, input_name ) collection = input_name - return re.sub(r'[^\w]+', '_', collection) + return re.sub(r"[^\w]+", "_", collection) def _use_cache_file(self): # TODO Move the default value outside code @@ -90,22 +85,21 @@ def _use_cache_file(self): if use_cache_file: stulog.logger.info( "Stanza=%s using cached file store to create checkpoint", - self._task_config[c.stanza_name] + self._task_config[c.stanza_name], ) return use_cache_file def _get_max_cache_seconds(self): default = self._DEFAULT_MAX_CACHE_SECONDS - seconds = self._task_config.get( - c.max_cache_seconds, default - ) + seconds = self._task_config.get(c.max_cache_seconds, default) try: seconds = int(seconds) except ValueError: stulog.logger.warning( "The max_cache_seconds '%s' is not a valid integer," " so set this variable to default value %s", - seconds, default + seconds, + default, ) seconds = default else: @@ -116,7 +110,9 @@ def _get_max_cache_seconds(self): stulog.logger.warning( "The max_cache_seconds (%s) is expected in range[1,%s]," " set it to %s", - seconds, maximum, adjusted + seconds, + maximum, + adjusted, ) seconds = adjusted return seconds @@ -127,27 +123,31 @@ def get_ckpt_key(self, namespaces=None): def get_ckpt(self, namespaces=None, show_namespaces=False): key, namespaces = self.get_ckpt_key(namespaces) raw_checkpoint = self._store.get_state(key) - stulog.logger.info("Get checkpoint key='%s' value='%s'", - key, json.dumps(raw_checkpoint)) + stulog.logger.debug( + "Get checkpoint key='%s' value='%s'", key, json.dumps(raw_checkpoint) + ) if not show_namespaces and raw_checkpoint: return raw_checkpoint.get("data") return raw_checkpoint def delete_if_exists(self, namespaces=None): """Return true if exist and deleted else False""" - key, namespaces = self._get_ckpt_key(namespaces) - if self._store.exists(key): + key, _ = self._key_formatter(namespaces) + try: self._store.delete_state(key) return True - return False + except Exception: + return False + def update_ckpt(self, ckpt, namespaces=None): if not ckpt: stulog.logger.warning("Checkpoint expect to be not empty.") return key, namespaces = self.get_ckpt_key(namespaces) value = {"namespaces": namespaces, "data": ckpt} - stulog.logger.info("Update checkpoint key='%s' value='%s'", - key, json.dumps(value)) + stulog.logger.info( + "Update checkpoint key='%s' value='%s'", key, json.dumps(value) + ) self._store.update_state(key, value) def remove_ckpt(self, namespaces=None): @@ -156,8 +156,11 @@ def remove_ckpt(self, namespaces=None): def _key_formatter(self, namespaces=None): if not namespaces: - stulog.logger.info('Namespaces is empty, using stanza name instead.') - namespaces = [self._task_config[c.stanza_name]] + stanza = self._task_config[c.stanza_name] + stulog.logger.info( + f"Namespaces is empty, using stanza name {stanza} instead." + ) + namespaces = [stanza] key_str = TACheckPointMgr.SEPARATOR.join(namespaces) hashed_file = th.format_name_for_file(key_str) stulog.logger.info("raw_file='%s' hashed_file='%s'", key_str, hashed_file) @@ -166,6 +169,6 @@ def _key_formatter(self, namespaces=None): def close(self, key=None): try: self._store.close(key) - stulog.logger.info('Closed state store successfully. key=%s', key) + stulog.logger.info("Closed state store successfully. key=%s", key) except Exception: - stulog.logger.exception('Error closing state store. key=%s', key) + stulog.logger.exception("Error closing state store. key=%s", key) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py index 6af27b5..4083b30 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_config.py @@ -13,27 +13,32 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from __future__ import absolute_import -from builtins import object import os.path as op import socket -from . import ta_consts as c -from . import ta_helper as th -from ..common import log as stulog +from solnlib import server_info from splunktalib import modinput as modinput -from splunktalib import splunk_cluster as sc from splunktalib.common import util +from ..common import log as stulog +from . import ta_consts as c +from . import ta_helper as th + # methods can be overrided by subclass : process_task_configs -class TaConfig(object): +class TaConfig: _current_hostname = socket.gethostname() _appname = util.get_appname_from_path(op.abspath(__file__)) - def __init__(self, meta_config, client_schema, log_suffix=None, - stanza_name=None, input_type=None, - single_instance=True): + def __init__( + self, + meta_config, + client_schema, + log_suffix=None, + stanza_name=None, + input_type=None, + single_instance=True, + ): self._meta_config = meta_config self._stanza_name = stanza_name self._input_type = input_type @@ -41,8 +46,9 @@ def __init__(self, meta_config, client_schema, log_suffix=None, self._single_instance = single_instance self._task_configs = [] self._client_schema = client_schema - self._server_info = sc.ServerInfo(meta_config[c.server_uri], - meta_config[c.session_key]) + self._server_info = server_info.ServerInfo.from_server_uri( + meta_config[c.server_uri], meta_config[c.session_key] + ) self._all_conf_contents = {} self._get_division_settings = {} self.set_logging() @@ -65,14 +71,18 @@ def get_task_configs(self): def get_all_conf_contents(self): if self._all_conf_contents: - return self._all_conf_contents.get(c.inputs), \ - self._all_conf_contents.get(c.all_configs), \ - self._all_conf_contents.get(c.global_settings) + return ( + self._all_conf_contents.get(c.inputs), + self._all_conf_contents.get(c.all_configs), + self._all_conf_contents.get(c.global_settings), + ) inputs, configs, global_settings = th.get_all_conf_contents( self._meta_config[c.server_uri], self._meta_config[c.session_key], - self._client_schema, self._input_type) + self._client_schema, + self._input_type, + ) self._all_conf_contents[c.inputs] = inputs self._all_conf_contents[c.all_configs] = configs self._all_conf_contents[c.global_settings] = global_settings @@ -81,8 +91,9 @@ def get_all_conf_contents(self): def set_logging(self): # The default logger name is "cloud_connect_engine" if self._stanza_name and self._log_suffix: - logger_name = self._log_suffix + "_" + th.format_name_for_file( - self._stanza_name) + logger_name = ( + self._log_suffix + "_" + th.format_name_for_file(self._stanza_name) + ) stulog.reset_logger(logger_name) inputs, configs, global_settings = self.get_all_conf_contents() log_level = "INFO" @@ -91,8 +102,8 @@ def set_logging(self): log_level = item["loglevel"] break stulog.set_log_level(log_level) - stulog.logger.info("Set log_level={}".format(log_level)) - stulog.logger.info("Start {} task".format(self._stanza_name)) + stulog.logger.info(f"Set log_level={log_level}") + stulog.logger.info(f"Start {self._stanza_name} task") def get_input_type(self): return self._input_type @@ -107,7 +118,8 @@ def _get_checkpoint_storage_type(self, config): if cs_type not in (c.checkpoint_auto, c.checkpoint_file): stulog.logger.warning( "Checkpoint storage type='%s' is invalid, change it to '%s'", - cs_type, c.checkpoint_auto + cs_type, + c.checkpoint_auto, ) cs_type = c.checkpoint_auto @@ -116,7 +128,7 @@ def _get_checkpoint_storage_type(self, config): "Checkpoint storage type is '%s' and instance is " "search head, set checkpoint storage type to '%s'.", c.checkpoint_auto, - c.checkpoint_kv_storage + c.checkpoint_kv_storage, ) cs_type = c.checkpoint_kv_storage return cs_type @@ -126,15 +138,15 @@ def _load_task_configs(self): if self._input_type: inputs = inputs.get(self._input_type) if not self._single_instance: - inputs = [input for input in inputs if - input[c.name] == self._stanza_name] + inputs = [input for input in inputs if input[c.name] == self._stanza_name] all_task_configs = [] for input in inputs: task_config = {} task_config.update(input) task_config[c.configs] = configs - task_config[c.settings] = \ - {item[c.name]: item for item in global_settings["settings"]} + task_config[c.settings] = { + item[c.name]: item for item in global_settings["settings"] + } if self.is_single_instance(): collection_interval = "collection_interval" task_config[c.interval] = task_config.get(collection_interval) @@ -142,11 +154,12 @@ def _load_task_configs(self): if task_config[c.interval] <= 0: raise ValueError( "The interval value {} is invalid." - " It should be a positive integer".format( - task_config[c.interval])) + " It should be a positive integer".format(task_config[c.interval]) + ) - task_config[c.checkpoint_storage_type] = \ - self._get_checkpoint_storage_type(task_config) + task_config[c.checkpoint_storage_type] = self._get_checkpoint_storage_type( + task_config + ) task_config[c.appname] = TaConfig._appname task_config[c.mod_input_name] = self._input_type @@ -161,12 +174,19 @@ def process_task_configs(self, task_configs): pass -def create_ta_config(settings, config_cls=TaConfig, log_suffix=None, - single_instance=True): +def create_ta_config( + settings, config_cls=TaConfig, log_suffix=None, single_instance=True +): meta_config, configs = modinput.get_modinput_configs_from_stdin() stanza_name = None input_type = None if configs and "://" in configs[0].get("name", ""): input_type, stanza_name = configs[0].get("name").split("://", 1) - return config_cls(meta_config, settings, log_suffix, stanza_name, - input_type, single_instance=single_instance) + return config_cls( + meta_config, + settings, + log_suffix, + stanza_name, + input_type, + single_instance=single_instance, + ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py index a1b0508..c0620bd 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_consts.py @@ -37,7 +37,7 @@ input_data = "input_data" interval = "interval" data = "data" -batch_size = 'batch_size' +batch_size = "batch_size" time_fmt = "%Y-%m-%dT%H:%M:%S" utc_time_fmt = "%Y-%m-%dT%H:%M:%S.%fZ" @@ -45,9 +45,9 @@ checkpoint_storage_type = "builtin_system_checkpoint_storage_type" # Possible values for checkpoint storage type -checkpoint_auto = 'auto' -checkpoint_kv_storage = 'kv_store' -checkpoint_file = 'file' +checkpoint_auto = "auto" +checkpoint_kv_storage = "kv_store" +checkpoint_file = "file" # For cache file use_cache_file = "builtin_system_use_cache_file" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py index 8358166..65fbf13 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_client.py @@ -14,32 +14,31 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from builtins import next -from builtins import object from . import ta_checkpoint_manager as cp from . import ta_data_collector as tdc -def build_event(host=None, - source=None, - sourcetype=None, - time=None, - index=None, - raw_data="", - is_unbroken=False, - is_done=False): +def build_event( + host=None, + source=None, + sourcetype=None, + time=None, + index=None, + raw_data="", + is_unbroken=False, + is_done=False, +): if is_unbroken is False and is_done is True: - raise Exception('is_unbroken=False is_done=True is invalid') - return tdc.event_tuple._make([host, source, sourcetype, time, index, - raw_data, is_unbroken, is_done]) + raise Exception("is_unbroken=False is_done=True is invalid") + return tdc.event_tuple._make( + [host, source, sourcetype, time, index, raw_data, is_unbroken, is_done] + ) -class TaDataClient(object): - def __init__(self, - meta_config, - task_config, - checkpoint_mgr=None, - event_writer=None): +class TaDataClient: + def __init__( + self, meta_config, task_config, checkpoint_mgr=None, event_writer=None + ): self._meta_config = meta_config self._task_config = task_config self._checkpoint_mgr = checkpoint_mgr @@ -56,24 +55,24 @@ def get(self): raise StopIteration -def create_data_collector(dataloader, - tconfig, - meta_configs, - task_config, - data_client_cls, - checkpoint_cls=None): +def create_data_collector( + dataloader, tconfig, meta_configs, task_config, data_client_cls, checkpoint_cls=None +): checkpoint_manager_cls = checkpoint_cls or cp.TACheckPointMgr - return tdc.TADataCollector(tconfig, meta_configs, task_config, - checkpoint_manager_cls, data_client_cls, - dataloader) + return tdc.TADataCollector( + tconfig, + meta_configs, + task_config, + checkpoint_manager_cls, + data_client_cls, + dataloader, + ) def client_adatper(job_func): class TaDataClientAdapter(TaDataClient): - def __init__(self, all_conf_contents, meta_config, task_config, - chp_mgr): - super(TaDataClientAdapter, self).__init__(meta_config, task_config, - chp_mgr) + def __init__(self, all_conf_contents, meta_config, task_config, chp_mgr): + super().__init__(meta_config, task_config, chp_mgr) self._execute_times = 0 self._gen = job_func(self._task_config, chp_mgr) @@ -83,7 +82,7 @@ def stop(self): """ # normaly base class just set self._stop as True - super(TaDataClientAdapter, self).stop() + super().stop() def get(self): """ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py index d81cfd3..7d323d6 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py @@ -14,51 +14,70 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from __future__ import absolute_import -from builtins import object import threading import time from collections import namedtuple -from . import ta_consts as c -from ..common import log as stulog from splunktalib.common import util as scu -evt_fmt = ("{0}" - "" - "" - "" - "{4}" - "") - -unbroken_evt_fmt = ("" - "" - "{0}" - "" - "" - "" - "{4}" - "" - "{6}" - "" - "") - -event_tuple = namedtuple('Event', - ['host', 'source', 'sourcetype', 'time', 'index', - 'raw_data', 'is_unbroken', 'is_done']) - - -class TADataCollector(object): - def __init__(self, tconfig, meta_config, task_config, - checkpoint_manager_cls, data_client_cls, data_loader): +from ..common import log as stulog +from . import ta_consts as c + +evt_fmt = ( + "{0}" + "" + "" + "" + "{4}" + "" +) + +unbroken_evt_fmt = ( + "" + '' + "{0}" + "" + "" + "" + "{4}" + "" + "{6}" + "" + "" +) + +event_tuple = namedtuple( + "Event", + [ + "host", + "source", + "sourcetype", + "time", + "index", + "raw_data", + "is_unbroken", + "is_done", + ], +) + + +class TADataCollector: + def __init__( + self, + tconfig, + meta_config, + task_config, + checkpoint_manager_cls, + data_client_cls, + data_loader, + ): self._lock = threading.Lock() self._ta_config = tconfig self._meta_config = meta_config self._task_config = task_config self._stopped = False self._p = self._get_logger_prefix() - self._checkpoint_manager = checkpoint_manager_cls(meta_config, - task_config) + self._checkpoint_manager = checkpoint_manager_cls(meta_config, task_config) self.data_client_cls = data_client_cls self._data_loader = data_loader self._client = None @@ -73,8 +92,7 @@ def get_interval(self): return self._task_config[c.interval] def _get_logger_prefix(self): - pairs = ['{}="{}"'.format(c.stanza_name, self._task_config[ - c.stanza_name])] + pairs = [f'{c.stanza_name}="{self._task_config[c.stanza_name]}"'] return "[{}]".format(" ".join(pairs)) def stop(self): @@ -95,38 +113,49 @@ def _build_event(self, events): assert event.raw_data, "the raw data of events is empty" if event.is_unbroken: evt = unbroken_evt_fmt.format( - event.host or "", event.source or "", event.sourcetype or - "", event.time or "", event.index or "", - scu.escape_cdata(event.raw_data), "" if - event.is_done else "") + event.host or "", + event.source or "", + event.sourcetype or "", + event.time or "", + event.index or "", + scu.escape_cdata(event.raw_data), + "" if event.is_done else "", + ) else: - evt = evt_fmt.format(event.host or "", event.source or "", - event.sourcetype or "", event.time or "", - event.index or "", - scu.escape_cdata(event.raw_data)) + evt = evt_fmt.format( + event.host or "", + event.source or "", + event.sourcetype or "", + event.time or "", + event.index or "", + scu.escape_cdata(event.raw_data), + ) evts.append(evt) return evts def _create_data_client(self): - return self.data_client_cls(self._meta_config, - self._task_config, - self._checkpoint_manager, - self._data_loader.get_event_writer()) + return self.data_client_cls( + self._meta_config, + self._task_config, + self._checkpoint_manager, + self._data_loader.get_event_writer(), + ) def index_data(self): if self._lock.locked(): stulog.logger.debug( "Last round of stanza={} is not done yet".format( - self._task_config[c.stanza_name])) + self._task_config[c.stanza_name] + ) + ) return with self._lock: try: self._do_safe_index() self._checkpoint_manager.close() except Exception: - stulog.logger.exception("{} Failed to index data" - .format(self._p)) - stulog.logger.info("{} End of indexing data".format(self._p)) + stulog.logger.exception(f"{self._p} Failed to index data") + stulog.logger.info(f"{self._p} End of indexing data") if not self._ta_config.is_single_instance(): self._data_loader.tear_down() @@ -134,9 +163,10 @@ def _write_events(self, events): evts = self._build_event(events) if evts: if not self._data_loader.write_events(evts): - stulog.logger.info("{} the event queue is closed and the " - "received data will be discarded".format( - self._p)) + stulog.logger.info( + "{} the event queue is closed and the " + "received data will be discarded".format(self._p) + ) return False return True @@ -151,10 +181,10 @@ def _do_safe_index(self): if not self._write_events(events): break except StopIteration: - stulog.logger.info("{} Finished this round".format(self._p)) + stulog.logger.info(f"{self._p} Finished this round") return except Exception: - stulog.logger.exception("{} Failed to get msg".format(self._p)) + stulog.logger.exception(f"{self._p} Failed to get msg") break # in case encounter exception or fail to write events if not self._stopped: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py index ab4edd7..3ad8d5d 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_loader.py @@ -16,22 +16,20 @@ """ Data Loader main entry point """ - - -from future import standard_library -standard_library.install_aliases() -from builtins import object -import queue -import os.path as op import configparser +import os.path as op +import queue +from solnlib import log +from solnlib import timer_queue as tq from splunktalib.concurrent import concurrent_executor as ce -from splunktalib import timer_queue as tq from splunktalib.schedule import job as sjob -from splunktalib.common import log + +# Global logger +logger = log.Logs().get_logger("util") -class TADataLoader(object): +class TADataLoader: """ Data Loader boots all underlying facilities to handle data collection """ @@ -63,7 +61,7 @@ def run(self, jobs): self._executor.start() self._timer_queue.start() self._scheduler.start() - log.logger.info("TADataLoader started.") + logger.info("TADataLoader started.") def _enqueue_io_job(job): job_props = job.get_props() @@ -71,8 +69,7 @@ def _enqueue_io_job(job): self.run_io_jobs((real_job,)) for job in jobs: - j = sjob.Job(_enqueue_io_job, {"real_job": job}, - job.get_interval()) + j = sjob.Job(_enqueue_io_job, {"real_job": job}, job.get_interval()) self._scheduler.add_jobs((j,)) self._wait_for_tear_down() @@ -81,10 +78,10 @@ def _enqueue_io_job(job): job.stop() self._scheduler.tear_down() - self._timer_queue.tear_down() + self._timer_queue.stop() self._executor.tear_down() self._event_writer.tear_down() - log.logger.info("DataLoader stopped.") + logger.info("DataLoader stopped.") def _wait_for_tear_down(self): wakeup_q = self._wakeup_queue @@ -95,13 +92,13 @@ def _wait_for_tear_down(self): pass else: if go_exit: - log.logger.info("DataLoader got stop signal") + logger.info("DataLoader got stop signal") self._stopped = True break def tear_down(self): self._wakeup_queue.put(True) - log.logger.info("DataLoader is going to stop.") + logger.info("DataLoader is going to stop.") def stopped(self): return self._stopped @@ -117,8 +114,7 @@ def run_compute_job_async(self, func, args=(), kwargs={}, callback=None): @return: AsyncResult """ - return self._executor.run_compute_func_async(func, args, - kwargs, callback) + return self._executor.run_compute_func_async(func, args, kwargs, callback) def add_timer(self, callback, when, interval): return self._timer_queue.add_timer(callback, when, interval) @@ -135,12 +131,11 @@ def get_event_writer(self): @staticmethod def _read_default_settings(): cur_dir = op.dirname(op.abspath(__file__)) - setting_file = op.join(cur_dir,"../../../","splunktalib", "setting.conf") + setting_file = op.join(cur_dir, "../../../", "splunktalib", "setting.conf") parser = configparser.ConfigParser() parser.read(setting_file) settings = {} - keys = ("process_size", "thread_min_size", "thread_max_size", - "task_queue_size") + keys = ("process_size", "thread_min_size", "thread_max_size", "task_queue_size") for option in keys: try: settings[option] = parser.get("global", option) @@ -151,20 +146,19 @@ def _read_default_settings(): settings[option] = int(settings[option]) except ValueError: settings[option] = -1 - log.logger.debug("settings: %s", settings) + logger.debug("settings: %s", settings) return settings -class GlobalDataLoader(object): - """ Singleton, inited when started""" +class GlobalDataLoader: + """Singleton, inited when started""" __instance = None @staticmethod def get_data_loader(scheduler, writer): if GlobalDataLoader.__instance is None: - GlobalDataLoader.__instance = TADataLoader( - scheduler, writer) + GlobalDataLoader.__instance = TADataLoader(scheduler, writer) return GlobalDataLoader.__instance @staticmethod diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py index d3b8e3c..bb9ddfa 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_helper.py @@ -13,29 +13,23 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import six -from builtins import object import hashlib import json import os.path as op import re from calendar import timegm from datetime import datetime +from functools import lru_cache -import sys -if sys.version_info[0] >= 3: - from functools import lru_cache -else: - from functools32 import lru_cache - +from splunktalib.common import util from splunktaucclib.global_config import GlobalConfig, GlobalConfigSchema -from . import ta_consts as c + from ...splunktacollectorlib import config as sc -from splunktalib.common import util +from . import ta_consts as c def utc2timestamp(human_time): - regex1 = "\d{4}-\d{2}-\d{2}.\d{2}:\d{2}:\d{2}" + regex1 = r"\d{4}-\d{2}-\d{2}.\d{2}:\d{2}:\d{2}" match = re.search(regex1, human_time) if match: formated = match.group() @@ -45,7 +39,7 @@ def utc2timestamp(human_time): strped_time = datetime.strptime(formated, c.time_fmt) timestamp = timegm(strped_time.utctimetuple()) - regex2 = "\d{4}-\d{2}-\d{2}.\d{2}:\d{2}:\d{2}(\.\d+)" + regex2 = r"\d{4}-\d{2}-\d{2}.\d{2}:\d{2}:\d{2}(\.\d+)" match = re.search(regex2, human_time) if match: timestamp += float(match.group(1)) @@ -61,17 +55,15 @@ def get_md5(data): :return: """ assert data is not None, "The input cannot be None" - if isinstance(data, six.string_types): - return hashlib.sha256(data.encode('utf-8')).hexdigest() + if isinstance(data, str): + return hashlib.sha256(data.encode("utf-8")).hexdigest() elif isinstance(data, (list, tuple, dict)): - return hashlib.sha256(json.dumps(data).encode('utf-8')).hexdigest() + return hashlib.sha256(json.dumps(data).encode("utf-8")).hexdigest() def get_all_conf_contents(server_uri, sessionkey, settings, input_type=None): schema = GlobalConfigSchema(settings) - global_config = GlobalConfig( - server_uri, sessionkey, schema - ) + global_config = GlobalConfig(server_uri, sessionkey, schema) inputs = global_config.inputs.load(input_type=input_type) configs = global_config.configs.load() settings = global_config.settings.load() @@ -80,10 +72,10 @@ def get_all_conf_contents(server_uri, sessionkey, settings, input_type=None): @lru_cache(maxsize=64) def format_name_for_file(name): - return hashlib.sha256(name.encode('utf-8')).hexdigest() + return hashlib.sha256(name.encode("utf-8")).hexdigest() -class ConfigSchemaHandler(object): +class ConfigSchemaHandler: _app_name = util.get_appname_from_path(op.abspath(__file__)) # Division schema keys. TYPE = "type" @@ -94,12 +86,13 @@ class ConfigSchemaHandler(object): SEPARATOR = "separator" def __init__(self, meta_configs, client_schema): - self._config = sc.Config(splunkd_uri=meta_configs[c.server_uri], - session_key=meta_configs[c.session_key], - schema=json.dumps(client_schema[ - c.config]), - user="nobody", - app=ConfigSchemaHandler._app_name) + self._config = sc.Config( + splunkd_uri=meta_configs[c.server_uri], + session_key=meta_configs[c.session_key], + schema=json.dumps(client_schema[c.config]), + user="nobody", + app=ConfigSchemaHandler._app_name, + ) self._client_schema = client_schema self._all_conf_contents = {} self._load_conf_contents() @@ -122,7 +115,8 @@ def _divide_settings(self): division_settings = dict() for division_endpoint, division_contents in division_schema.items(): division_settings[division_endpoint] = self._process_division( - division_endpoint, division_contents) + division_endpoint, division_contents + ) return division_settings def _load_conf_contents(self): @@ -133,26 +127,32 @@ def _process_division(self, division_endpoint, division_contents): assert isinstance(division_contents, dict) for division_key, division_value in division_contents.items(): try: - assert self.TYPE in division_value and \ - division_value[self.TYPE] in \ - [self.TYPE_SINGLE, self.TYPE_MULTI] and \ - self.SEPARATOR in division_value if \ - division_value[self.TYPE] == self.TYPE_MULTI else True + assert ( + self.TYPE in division_value + and division_value[self.TYPE] in [self.TYPE_SINGLE, self.TYPE_MULTI] + and self.SEPARATOR in division_value + if division_value[self.TYPE] == self.TYPE_MULTI + else True + ) except Exception: raise Exception("Invalid division schema") - division_metrics.append(DivisionRule(division_endpoint, - division_key, - division_value[self.TYPE], - division_value.get( - self.SEPARATOR, - ), - division_value.get( - self.REFER, - ))) + division_metrics.append( + DivisionRule( + division_endpoint, + division_key, + division_value[self.TYPE], + division_value.get( + self.SEPARATOR, + ), + division_value.get( + self.REFER, + ), + ) + ) return division_metrics -class DivisionRule(object): +class DivisionRule: def __init__(self, endpoint, metric, type, separator, refer): self._endpoint = endpoint self._metric = metric diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py index 55382de..654d7b3 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_mod_input.py @@ -18,25 +18,24 @@ """ This is the main entry point for My TA """ -from __future__ import print_function - -from builtins import str import os.path as op import platform import sys import time +from solnlib import file_monitor as fm +from solnlib import orphan_process_monitor as opm +from solnlib import utils +from splunktalib import modinput +from splunktalib.common import util as sc_util + +from ...common.lib_util import get_app_root_dir, get_mod_input_script_name +from ..common import load_schema_file as ld +from ..common import log as stulog from . import ta_checkpoint_manager as cpmgr from . import ta_config as tc from . import ta_data_client as tdc from . import ta_data_loader as dl -from ..common import load_schema_file as ld -from ..common import log as stulog -from ...common.lib_util import get_app_root_dir, get_mod_input_script_name -from splunktalib import file_monitor as fm -from splunktalib import modinput -from splunktalib import orphan_process_monitor as opm -from splunktalib.common import util as utils utils.remove_http_proxy_env_vars() @@ -44,17 +43,22 @@ def do_scheme( - mod_input_name, - schema_para_list=None, - single_instance=True, + mod_input_name, + schema_para_list=None, + single_instance=True, ): """ Feed splunkd the TA's scheme """ builtin_names = { - "name", "index", "sourcetype", "host", "source", - "disabled", "interval" + "name", + "index", + "sourcetype", + "host", + "source", + "disabled", + "interval", } param_string_list = [] @@ -72,13 +76,18 @@ def do_scheme( 0 0 - """.format(param=param) + """.format( + param=param + ) ) - description = ("Go to the add-on's configuration UI and configure" - " modular inputs under the Inputs menu.") + description = ( + "Go to the add-on's configuration UI and configure" + " modular inputs under the Inputs menu." + ) - print(""" + print( + """ {data_input_title} {description} @@ -95,11 +104,12 @@ def do_scheme( """.format( - single_instance=(str(single_instance)).lower(), - data_input_title=mod_input_name, - param_str=''.join(param_string_list), - description=description, - )) + single_instance=(str(single_instance)).lower(), + data_input_title=mod_input_name, + param_str="".join(param_string_list), + description=description, + ) + ) def _setup_signal_handler(data_loader, ta_short_name): @@ -109,11 +119,11 @@ def _setup_signal_handler(data_loader, ta_short_name): """ def _handle_exit(signum, frame): - stulog.logger.info("{} receives exit signal".format(ta_short_name)) + stulog.logger.info(f"{ta_short_name} receives exit signal") if data_loader is not None: data_loader.tear_down() - utils.handle_tear_down_signals(_handle_exit) + utils.handle_teardown_signals(_handle_exit) def _handle_file_changes(data_loader): @@ -122,8 +132,7 @@ def _handle_file_changes(data_loader): """ def _handle_refresh(changed_files): - stulog.logger.info("Detect {} changed, reboot itself".format( - changed_files)) + stulog.logger.info(f"Detect {changed_files} changed, reboot itself") data_loader.tear_down() return _handle_refresh @@ -142,18 +151,25 @@ def _get_conf_files(settings): return [op.join(ta_dir, "local", f) for f in file_list] -def run(collector_cls, settings, checkpoint_cls=None, config_cls=None, - log_suffix=None, single_instance=True, cc_json_file=None): +def run( + collector_cls, + settings, + checkpoint_cls=None, + config_cls=None, + log_suffix=None, + single_instance=True, + cc_json_file=None, +): """ Main loop. Run this TA forever """ ta_short_name = settings["meta"]["name"].lower() # This is for stdout flush - utils.disable_stdout_buffer() + sc_util.disable_stdout_buffer() # http://bugs.python.org/issue7980 - time.strptime('2016-01-01', '%Y-%m-%d') + time.strptime("2016-01-01", "%Y-%m-%d") loader = dl.create_data_loader() @@ -162,8 +178,9 @@ def run(collector_cls, settings, checkpoint_cls=None, config_cls=None, # monitor files to reboot try: - monitor = fm.FileMonitor(_handle_file_changes(loader), - _get_conf_files(settings)) + monitor = fm.FileMonitor( + _handle_file_changes(loader), _get_conf_files(settings) + ) loader.add_timer(monitor.check_changes, time.time(), 10) except Exception: stulog.logger.exception("Fail to add files for monitoring") @@ -172,8 +189,9 @@ def run(collector_cls, settings, checkpoint_cls=None, config_cls=None, orphan_checker = opm.OrphanProcessChecker(loader.tear_down) loader.add_timer(orphan_checker.check_orphan, time.time(), 1) - tconfig = tc.create_ta_config(settings, config_cls or tc.TaConfig, - log_suffix, single_instance=single_instance) + tconfig = tc.create_ta_config( + settings, config_cls or tc.TaConfig, log_suffix, single_instance=single_instance + ) task_configs = tconfig.get_task_configs() if not task_configs: @@ -184,17 +202,19 @@ def run(collector_cls, settings, checkpoint_cls=None, config_cls=None, if tconfig.is_shc_member(): # Don't support SHC env - stulog.logger.error("This host is in search head cluster environment , " - "will exit.") + stulog.logger.error( + "This host is in search head cluster environment , " "will exit." + ) return # In this case, use file for checkpoint - if _is_checkpoint_dir_length_exceed_limit(tconfig, - meta_config["checkpoint_dir"]): - stulog.logger.error("The length of the checkpoint directory path: '{}' " - "is too long. The max length we support is {}", - meta_config["checkpoint_dir"], - __CHECKPOINT_DIR_MAX_LEN__) + if _is_checkpoint_dir_length_exceed_limit(tconfig, meta_config["checkpoint_dir"]): + stulog.logger.error( + "The length of the checkpoint directory path: '{}' " + "is too long. The max length we support is {}", + meta_config["checkpoint_dir"], + __CHECKPOINT_DIR_MAX_LEN__, + ) return jobs = [ @@ -204,18 +224,20 @@ def run(collector_cls, settings, checkpoint_cls=None, config_cls=None, meta_config, task_config, collector_cls, - checkpoint_cls=checkpoint_cls or cpmgr.TACheckPointMgr + checkpoint_cls=checkpoint_cls or cpmgr.TACheckPointMgr, ) for task_config in task_configs - ] + ] loader.run(jobs) def _is_checkpoint_dir_length_exceed_limit(config, checkpoint_dir): - return platform.system() == 'Windows' \ - and not config.is_search_head() \ - and len(checkpoint_dir) >= __CHECKPOINT_DIR_MAX_LEN__ + return ( + platform.system() == "Windows" + and not config.is_search_head() + and len(checkpoint_dir) >= __CHECKPOINT_DIR_MAX_LEN__ + ) def validate_config(): @@ -238,14 +260,14 @@ def usage(): def main( - collector_cls, - schema_file_path, - log_suffix="modinput", - checkpoint_cls=None, - config_cls=None, - cc_json_file=None, - schema_para_list=None, - single_instance=True + collector_cls, + schema_file_path, + log_suffix="modinput", + checkpoint_cls=None, + config_cls=None, + cc_json_file=None, + schema_para_list=None, + single_instance=True, ): """ Main entry point @@ -263,7 +285,7 @@ def main( do_scheme( mod_input_name=mod_input_name, schema_para_list=schema_para_list, - single_instance=single_instance + single_instance=single_instance, ) elif args[1] == "--validate-arguments": sys.exit(validate_config()) @@ -280,10 +302,9 @@ def main( config_cls=config_cls, log_suffix=log_suffix, single_instance=single_instance, - cc_json_file=cc_json_file + cc_json_file=cc_json_file, ) except Exception: - stulog.logger.exception( - "{} task encounter exception".format(mod_input_name)) - stulog.logger.info("End {} task".format(mod_input_name)) + stulog.logger.exception(f"{mod_input_name} task encounter exception") + stulog.logger.info(f"End {mod_input_name} task") sys.exit(0) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py index 789afce..9aa813c 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py @@ -18,15 +18,13 @@ """ import os -import sys import re +import sys ta_name = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -ta_lib_name = re.sub("[^\w]+", "_", ta_name.lower()) +ta_lib_name = re.sub(r"[^\w]+", "_", ta_name.lower()) assert ta_name or ta_name == "package", "TA name is None or package" pattern = re.compile(r"[\\/]etc[\\/]apps[\\/][^\\/]+[\\/]bin[\\/]?$") new_paths = [path for path in sys.path if not pattern.search(path) or ta_name in path] new_paths.insert(0, os.path.sep.join([os.path.dirname(__file__), ta_lib_name])) sys.path = new_paths - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py index 3bd67b9..42d2dcb 100755 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/cloudconnectlib/splunktacollectorlib/ta_cloud_connect_client.py @@ -13,36 +13,28 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .data_collection.ta_data_client import TaDataClient +from ..common.log import set_cc_logger from ..splunktacollectorlib.common import log as stulog from ..splunktacollectorlib.data_collection import ta_consts as c -from ..common.log import set_cc_logger +from .data_collection.ta_data_client import TaDataClient class TACloudConnectClient(TaDataClient): - def __init__(self, - meta_config, - task_config, - checkpoint_mgr=None, - event_writer=None - ): - super(TACloudConnectClient, self).__init__(meta_config, - task_config, - checkpoint_mgr, - event_writer) + def __init__( + self, meta_config, task_config, checkpoint_mgr=None, event_writer=None + ): + super().__init__(meta_config, task_config, checkpoint_mgr, event_writer) self._set_log() self._cc_config_file = self._meta_config["cc_json_file"] - from ..core.pipemgr import PipeManager from ..client import CloudConnectClient as Client + from ..core.pipemgr import PipeManager + self._pipe_mgr = PipeManager(event_writer=event_writer) - self._client = Client(self._task_config, self._cc_config_file, - checkpoint_mgr) + self._client = Client(self._task_config, self._cc_config_file, checkpoint_mgr) def _set_log(self): - pairs = ['{}="{}"'.format(c.stanza_name, self._task_config[ - c.stanza_name])] - set_cc_logger(stulog.logger, - logger_prefix="[{}]".format(" ".join(pairs))) + pairs = [f'{c.stanza_name}="{self._task_config[c.stanza_name]}"'] + set_cc_logger(stulog.logger, logger_prefix="[{}]".format(" ".join(pairs))) def is_stopped(self): return self._stop diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/decorator.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/decorator.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/ElementTree.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/ElementTree.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/cElementTree.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/cElementTree.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/common.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/common.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/expatbuilder.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/expatbuilder.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/expatreader.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/expatreader.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/lxml.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/lxml.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/minidom.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/minidom.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/pulldom.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/pulldom.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/sax.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/sax.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/xmlrpc.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/defusedxml/xmlrpc.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/distutils-precedence.pth b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/distutils-precedence.pth deleted file mode 100644 index 7f009fe..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/distutils-precedence.pth +++ /dev/null @@ -1 +0,0 @@ -import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'local') == 'local'; enabled and __import__('_distutils_hack').add_shim(); diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/LICENSE.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/LICENSE.txt deleted file mode 100644 index 4c904db..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013-2019 Python Charmers Pty Ltd, Australia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/METADATA deleted file mode 100644 index b6f8357..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/METADATA +++ /dev/null @@ -1,110 +0,0 @@ -Metadata-Version: 2.1 -Name: future -Version: 0.18.2 -Summary: Clean single-source support for Python 3 and 2 -Home-page: https://python-future.org -Author: Ed Schofield -Author-email: ed@pythoncharmers.com -License: MIT -Keywords: future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2 -Platform: UNKNOWN -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: License :: OSI Approved -Classifier: License :: OSI Approved :: MIT License -Classifier: Development Status :: 4 - Beta -Classifier: Intended Audience :: Developers -Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.* -License-File: LICENSE.txt - - -future: Easy, safe support for Python 2/3 compatibility -======================================================= - -``future`` is the missing compatibility layer between Python 2 and Python -3. It allows you to use a single, clean Python 3.x-compatible codebase to -support both Python 2 and Python 3 with minimal overhead. - -It is designed to be used as follows:: - - from __future__ import (absolute_import, division, - print_function, unicode_literals) - from builtins import ( - bytes, dict, int, list, object, range, str, - ascii, chr, hex, input, next, oct, open, - pow, round, super, - filter, map, zip) - -followed by predominantly standard, idiomatic Python 3 code that then runs -similarly on Python 2.6/2.7 and Python 3.3+. - -The imports have no effect on Python 3. On Python 2, they shadow the -corresponding builtins, which normally have different semantics on Python 3 -versus 2, to provide their Python 3 semantics. - - -Standard library reorganization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``future`` supports the standard library reorganization (PEP 3108) through the -following Py3 interfaces: - - >>> # Top-level packages with Py3 names provided on Py2: - >>> import html.parser - >>> import queue - >>> import tkinter.dialog - >>> import xmlrpc.client - >>> # etc. - - >>> # Aliases provided for extensions to existing Py2 module names: - >>> from future.standard_library import install_aliases - >>> install_aliases() - - >>> from collections import Counter, OrderedDict # backported to Py2.6 - >>> from collections import UserDict, UserList, UserString - >>> import urllib.request - >>> from itertools import filterfalse, zip_longest - >>> from subprocess import getoutput, getstatusoutput - - -Automatic conversion --------------------- - -An included script called `futurize -`_ aids in converting -code (from either Python 2 or Python 3) to code compatible with both -platforms. It is similar to ``python-modernize`` but goes further in -providing Python 3 compatibility through the use of the backported types -and builtin functions in ``future``. - - -Documentation -------------- - -See: http://python-future.org - - -Credits -------- - -:Author: Ed Schofield, Jordan M. Adler, et al -:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte - Ltd, Singapore. http://pythoncharmers.com -:Others: See docs/credits.rst or http://python-future.org/credits.html - - -Licensing ---------- -Copyright 2013-2019 Python Charmers Pty Ltd, Australia. -The software is distributed under an MIT licence. See LICENSE.txt. - - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/RECORD deleted file mode 100644 index 09cd9f3..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/RECORD +++ /dev/null @@ -1,219 +0,0 @@ -../../bin/futurize,sha256=sXCNiOHh_TxI0O2jpXervN8SaTGHsk3s7DWZVaVd7ps,215 -../../bin/pasteurize,sha256=Z6X36i6UI3tjKEgkWJ-dYlE1hFprHZZaMzOq0Pr6yu8,217 -future-0.18.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -future-0.18.2.dist-info/LICENSE.txt,sha256=kW5WE5LUhHG5wjQ39W4mUvMgyzsRnOqhYu30EBb3Rrk,1083 -future-0.18.2.dist-info/METADATA,sha256=Xjjk3ziBhbMk6Wv0UPOWwVUsKGWBitr7WJrud7vWKss,3729 -future-0.18.2.dist-info/RECORD,, -future-0.18.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -future-0.18.2.dist-info/entry_points.txt,sha256=-ATQtLUC2gkzrCYqc1Twac093xrI164NuMwsRALJWnM,89 -future-0.18.2.dist-info/top_level.txt,sha256=DT0C3az2gb-uJaj-fs0h4WwHYlJVDp0EvLdud1y5Zyw,38 -future/__init__.py,sha256=TsDq1XoGk6Jfach_rEhwAi07zR5OKYZ6hhUlG5Bj6Ag,2991 -future/backports/__init__.py,sha256=5QXvQ_jc5Xx6p4dSaHnZXPZazBEunKDKhbUjxZ0XD1I,530 -future/backports/_markupbase.py,sha256=MDPTCykLq4J7Aea3PvYotATEE0CG4R_SjlxfJaLXTJM,16215 -future/backports/datetime.py,sha256=I214Vu0cRY8mi8J5aIcsAyQJnWmOKXeLV-QTWSn7VQU,75552 -future/backports/email/__init__.py,sha256=eH3AJr3FkuBy_D6yS1V2K76Q2CQ93y2zmAMWmn8FbHI,2269 -future/backports/email/_encoded_words.py,sha256=m1vTRfxAQdg4VyWO7PF-1ih1mmq97V-BPyHHkuEwSME,8443 -future/backports/email/_header_value_parser.py,sha256=cj_1ce1voLn8H98r9cKqiSLgfFSxCv3_UL3sSvjqgjk,104692 -future/backports/email/_parseaddr.py,sha256=KewEnos0YDM-SYX503z7E1MmVbG5VRaKjxjcl0Ipjbs,17389 -future/backports/email/_policybase.py,sha256=2lJD9xouiz4uHvWGQ6j1nwlwWVQGwwzpy5JZoeQqhUc,14647 -future/backports/email/base64mime.py,sha256=sey6iJA9pHIOdFgoV1p7QAwYVjt8CEkDhITt304-nyI,3729 -future/backports/email/charset.py,sha256=CfE4iV2zAq6MQC0CHXHLnwTNW71zmhNITbzOcfxE4vY,17439 -future/backports/email/encoders.py,sha256=Nn4Pcx1rOdRgoSIzB6T5RWHl5zxClbf32wgE6D0tUt8,2800 -future/backports/email/errors.py,sha256=tRX8PP5g7mk2bAxL1jTCYrbfhD2gPZFNrh4_GJRM8OQ,3680 -future/backports/email/feedparser.py,sha256=bvmhb4cdY-ipextPK2K2sDgMsNvTspmuQfYyCxc4zSc,22736 -future/backports/email/generator.py,sha256=lpaLhZHneguvZ2QgRu7Figkjb7zmY28AGhj9iZTdI7s,19520 -future/backports/email/header.py,sha256=uBHbNKO-yx5I9KBflernJpyy3fX4gImCB1QE7ICApLs,24448 -future/backports/email/headerregistry.py,sha256=ZPbvLKXD0NMLSU4jXlVHfGyGcLMrFm-GQVURu_XHj88,20637 -future/backports/email/iterators.py,sha256=kMRYFGy3SVVpo7HG7JJr2ZAlOoaX6CVPzKYwDSvLfV0,2348 -future/backports/email/message.py,sha256=I6WW5cZDza7uwLOGJSvsDhGZC9K_Q570Lk2gt_vDUXM,35237 -future/backports/email/mime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -future/backports/email/mime/application.py,sha256=m-5a4mSxu2E32XAImnp9x9eMVX5Vme2iNgn2dMMNyss,1401 -future/backports/email/mime/audio.py,sha256=2ognalFRadcsUYQYMUZbjv5i1xJbFhQN643doMuI7M4,2815 -future/backports/email/mime/base.py,sha256=wV3ClQyMsOqmkXSXbk_wd_zPoPTvBx8kAIzq3rdM4lE,875 -future/backports/email/mime/image.py,sha256=DpQk1sB-IMmO43AF4uadsXyf_y5TdEzJLfyhqR48bIw,1907 -future/backports/email/mime/message.py,sha256=pFsMhXW07aRjsLq1peO847PApWFAl28-Z2Z7BP1Dn74,1429 -future/backports/email/mime/multipart.py,sha256=j4Lf_sJmuwTbfgdQ6R35_t1_ha2DynJBJDvpjwbNObE,1699 -future/backports/email/mime/nonmultipart.py,sha256=Ciba1Z8d2yLDDpxgDJuk3Bb-TqcpE9HCd8KfbW5vgl4,832 -future/backports/email/mime/text.py,sha256=zV98BjoR4S_nX8c47x43LnsnifeGhIfNGwSAh575bs0,1552 -future/backports/email/parser.py,sha256=-115SC3DHZ6lLijWFTxuOnE-GiM2BOYaUSz-QpmvYSo,5312 -future/backports/email/policy.py,sha256=gpcbhVRXuCohkK6MUqopTs1lv4E4-ZVUO6OVncoGEJE,8823 -future/backports/email/quoprimime.py,sha256=w93W5XgdFpyGaDqDBJrnXF_v_npH5r20WuAxmrAzyQg,10923 -future/backports/email/utils.py,sha256=vpfN0E8UjNbNw-2NFBQGCo4TNgrghMsqzpEYW5C_fBs,14270 -future/backports/html/__init__.py,sha256=FKwqFtWMCoGNkhU97OPnR1fZSh6etAKfN1FU1KvXcV8,924 -future/backports/html/entities.py,sha256=kzoRnQyGk_3DgoucHLhL5QL1pglK9nvmxhPIGZFDTnc,75428 -future/backports/html/parser.py,sha256=G2tUObvbHSotNt06JLY-BP1swaZNfDYFd_ENWDjPmRg,19770 -future/backports/http/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -future/backports/http/client.py,sha256=76EbhEZOtvdHFcU-jrjivoff13oQ9IMbdkZEdf5kQzQ,47602 -future/backports/http/cookiejar.py,sha256=_Vy4BPT-h0ZT0R_utGQAFXzuOAdmU9KedGFffyX9wN4,76559 -future/backports/http/cookies.py,sha256=DsyDUGDEbCXAA9Jq6suswSc76uSZqUu39adDDNj8XGw,21581 -future/backports/http/server.py,sha256=1CaMxgzHf9lYhmTJyE7topgjRIlIn9cnjgw8YEvwJV4,45523 -future/backports/misc.py,sha256=AkbED6BdHKnYCmIAontT4zHKTqdPPfJfn35HIs6LDrg,32682 -future/backports/socket.py,sha256=DH1V6IjKPpJ0tln8bYvxvQ7qnvZG-UoQtMA5yVleHiU,15663 -future/backports/socketserver.py,sha256=Twvyk5FqVnOeiNcbVsyMDPTF1mNlkKfyofG7tKxTdD8,24286 -future/backports/test/__init__.py,sha256=9dXxIZnkI095YfHC-XIaVF6d31GjeY1Ag8TEzcFgepM,264 -future/backports/test/badcert.pem,sha256=JioQeRZkHH8hGsWJjAF3U1zQvcWqhyzG6IOEJpTY9SE,1928 -future/backports/test/badkey.pem,sha256=gaBK9px_gG7DmrLKxfD6f6i-toAmARBTVfs-YGFRQF0,2162 -future/backports/test/dh512.pem,sha256=dUTsjtLbK-femrorUrTGF8qvLjhTiT_n4Uo5V6u__Gs,402 -future/backports/test/https_svn_python_org_root.pem,sha256=wOB3Onnc62Iu9kEFd8GcHhd_suucYjpJNA3jyfHeJWA,2569 -future/backports/test/keycert.passwd.pem,sha256=ZBfnVLpbBtAOf_2gCdiQ-yrBHmRsNzSf8VC3UpQZIjg,1830 -future/backports/test/keycert.pem,sha256=xPXi5idPcQVbrhgxBqF2TNGm6sSZ2aLVVEt6DWzplL8,1783 -future/backports/test/keycert2.pem,sha256=DB46FEAYv8BWwQJ-5RzC696FxPN7CON-Qsi-R4poJgc,1795 -future/backports/test/nokia.pem,sha256=s00x0uPDSaa5DHJ_CwzlVhg3OVdJ47f4zgqQdd0SAfQ,1923 -future/backports/test/nullbytecert.pem,sha256=NFRYWhmP_qT3jGfVjR6-iaC-EQdhIFjiXtTLN5ZPKnE,5435 -future/backports/test/nullcert.pem,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -future/backports/test/pystone.py,sha256=fvyoJ_tVovTNaxbJmdJMwr9F6SngY-U4ibULnd_wUqA,7427 -future/backports/test/sha256.pem,sha256=3wB-GQqEc7jq-PYwYAQaPbtTvvr7stk_DVmZxFgehfA,8344 -future/backports/test/ssl_cert.pem,sha256=M607jJNeIeHG9BlTf_jaQkPJI4nOxSJPn-zmEAaW43M,867 -future/backports/test/ssl_key.passwd.pem,sha256=I_WH4sBw9Vs9Z-BvmuXY0aw8tx8avv6rm5UL4S_pP00,963 -future/backports/test/ssl_key.pem,sha256=VKGU-R3UYaZpVTXl7chWl4vEYEDeob69SfvRTQ8aq_4,916 -future/backports/test/ssl_servers.py,sha256=-pd7HMZljuZfFRAbCAiAP_2G04orITJFj-S9ddr6o84,7209 -future/backports/test/support.py,sha256=zJrb-pz-Wu2dZwnNodg1v3w96zVq7ORuN-hOGOHbdA8,70881 -future/backports/total_ordering.py,sha256=O3M57_IisQ-zW5hW20uxkfk4fTGsr0EF2tAKx3BksQo,1929 -future/backports/urllib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -future/backports/urllib/error.py,sha256=ktikuK9ag4lS4f8Z0k5p1F11qF40N2AiOtjbXiF97ew,2715 -future/backports/urllib/parse.py,sha256=67avrYqV1UK7i_22goRUrvJ8SffzjRdTja9wzq_ynXY,35792 -future/backports/urllib/request.py,sha256=aR9ZMzfhV1C2Qk3wFsGvkwxqtdPTdsJVGRt5DUCwgJ8,96276 -future/backports/urllib/response.py,sha256=ooQyswwbb-9N6IVi1Kwjss1aR-Kvm8ZNezoyVEonp8c,3180 -future/backports/urllib/robotparser.py,sha256=pnAGTbKhdbCq_9yMZp7m8hj5q_NJpyQX6oQIZuYcnkw,6865 -future/backports/xmlrpc/__init__.py,sha256=h61ciVTdVvu8oEUXv4dHf_Tc5XUXDH3RKB1-8fQhSsg,38 -future/backports/xmlrpc/client.py,sha256=6a6Pvx_RVC9gIHDkFOVdREeGaZckOOiWd7T6GyzU3qU,48133 -future/backports/xmlrpc/server.py,sha256=W_RW5hgYbNV2LGbnvngzm7akacRdK-XFY-Cy2HL-qsY,37285 -future/builtins/__init__.py,sha256=jSdOucWfCsfkfTR8Jd4-Ls-YQpJ0AnzUomBxgwuoxNs,1687 -future/builtins/disabled.py,sha256=Ysq74bsmwntpq7dzkwTAD7IHKrkXy66vJlPshVwgVBI,2109 -future/builtins/iterators.py,sha256=l1Zawm2x82oqOuGGtCZRE76Ej98sMlHQwu9fZLK5RrA,1396 -future/builtins/misc.py,sha256=hctlKKWUyN0Eoodxg4ySQHEqARTukOLR4L5K5c6PW9k,4550 -future/builtins/new_min_max.py,sha256=7qQ4iiG4GDgRzjPzzzmg9pdby35Mtt6xNOOsyqHnIGY,1757 -future/builtins/newnext.py,sha256=oxXB8baXqJv29YG40aCS9UXk9zObyoOjya8BJ7NdBJM,2009 -future/builtins/newround.py,sha256=l2EXPAFU3fAsZigJxUH6x66B7jhNaB076-L5FR617R8,3181 -future/builtins/newsuper.py,sha256=LmiUQ_f6NXDIz6v6sDPkoTWl-2Zccy7PpZfQKYtscac,4146 -future/moves/__init__.py,sha256=MsAW69Xp_fqUo4xODufcKM6AZf-ozHaz44WPZdsDFJA,220 -future/moves/_dummy_thread.py,sha256=c8ZRUd8ffvyvGKGGgve5NKc8VdtAWquu8-4FnO2EdvA,175 -future/moves/_markupbase.py,sha256=W9wh_Gu3jDAMIhVBV1ZnCkJwYLHRk_v_su_HLALBkZQ,171 -future/moves/_thread.py,sha256=rwY7L4BZMFPlrp_i6T2Un4_iKYwnrXJ-yV6FJZN8YDo,163 -future/moves/builtins.py,sha256=4sjjKiylecJeL9da_RaBZjdymX2jtMs84oA9lCqb4Ug,281 -future/moves/collections.py,sha256=OKQ-TfUgms_2bnZRn4hrclLDoiN2i-HSWcjs3BC2iY8,417 -future/moves/configparser.py,sha256=TNy226uCbljjU-DjAVo7j7Effbj5zxXvDh0SdXehbzk,146 -future/moves/copyreg.py,sha256=Y3UjLXIMSOxZggXtvZucE9yv4tkKZtVan45z8eix4sU,438 -future/moves/dbm/__init__.py,sha256=_VkvQHC2UcIgZFPRroiX_P0Fs7HNqS_69flR0-oq2B8,488 -future/moves/dbm/dumb.py,sha256=HKdjjtO3EyP9EKi1Hgxh_eUU6yCQ0fBX9NN3n-zb8JE,166 -future/moves/dbm/gnu.py,sha256=XoCSEpZ2QaOgo2h1m80GW7NUgj_b93BKtbcuwgtnaKo,162 -future/moves/dbm/ndbm.py,sha256=OFnreyo_1YHDBl5YUm9gCzKlN1MHgWbfSQAZVls2jaM,162 -future/moves/html/__init__.py,sha256=BSUFSHxXf2kGvHozlnrB1nn6bPE6p4PpN3DwA_Z5geo,1016 -future/moves/html/entities.py,sha256=lVvchdjK_RzRj759eg4RMvGWHfgBbj0tKGOoZ8dbRyY,177 -future/moves/html/parser.py,sha256=V2XpHLKLCxQum3N9xlO3IUccAD7BIykZMqdEcWET3vY,167 -future/moves/http/__init__.py,sha256=Mx1v_Tcks4udHCtDM8q2xnYUiQ01gD7EpPyeQwsP3-Q,71 -future/moves/http/client.py,sha256=hqEBq7GDXZidd1AscKnSyjSoMcuj8rERqGTmD7VheDQ,165 -future/moves/http/cookiejar.py,sha256=Frr9ZZCg-145ymy0VGpiPJhvBEpJtVqRBYPaKhgT1Z4,173 -future/moves/http/cookies.py,sha256=PPrHa1_oDbu3D_BhJGc6PvMgY1KoxyYq1jqeJwEcMvE,233 -future/moves/http/server.py,sha256=8YQlSCShjAsB5rr5foVvZgp3IzwYFvTmGZCHhBSDtaI,606 -future/moves/itertools.py,sha256=PVxFHRlBQl9ElS0cuGFPcUtj53eHX7Z1DmggzGfgQ6c,158 -future/moves/pickle.py,sha256=r8j9skzfE8ZCeHyh_OB-WucOkRTIHN7zpRM7l7V3qS4,229 -future/moves/queue.py,sha256=uxvLCChF-zxWWgrY1a_wxt8rp2jILdwO4PrnkBW6VTE,160 -future/moves/reprlib.py,sha256=Nt5sUgMQ3jeVIukqSHOvB0UIsl6Y5t-mmT_13mpZmiY,161 -future/moves/socketserver.py,sha256=v8ZLurDxHOgsubYm1iefjlpnnJQcx2VuRUGt9FCJB9k,174 -future/moves/subprocess.py,sha256=oqRSMfFZkxM4MXkt3oD5N6eBwmmJ6rQ9KPhvSQKT_hM,251 -future/moves/sys.py,sha256=HOMRX4Loim75FMbWawd3oEwuGNJR-ClMREEFkVpBsRs,132 -future/moves/test/__init__.py,sha256=yB9F-fDQpzu1v8cBoKgIrL2ScUNqjlkqEztYrGVCQ-0,110 -future/moves/test/support.py,sha256=6zGgTTXcERyBJIQ04-X-sAe781tVgLVHp3HzmQPy52g,259 -future/moves/tkinter/__init__.py,sha256=jV9vDx3wRl0bsoclU8oSe-5SqHQ3YpCbStmqtXnq1p4,620 -future/moves/tkinter/colorchooser.py,sha256=kprlmpRtvDbW5Gq43H1mi2KmNJ2kuzLQOba0a5EwDkU,333 -future/moves/tkinter/commondialog.py,sha256=mdUbq1IZqOGaSA7_8R367IukDCsMfzXiVHrTQQpp7Z0,333 -future/moves/tkinter/constants.py,sha256=0qRUrZLRPdVxueABL9KTzzEWEsk6xM1rOjxK6OHxXtA,324 -future/moves/tkinter/dialog.py,sha256=ksp-zvs-_A90P9RNHS8S27f1k8f48zG2Bel2jwZV5y0,311 -future/moves/tkinter/dnd.py,sha256=C_Ah0Urnyf2XKE5u-oP6mWi16RzMSXgMA1uhBSAwKY8,306 -future/moves/tkinter/filedialog.py,sha256=RSJFDGOP2AJ4T0ZscJ2hyF9ssOWp9t_S_DtnOmT-WZ8,323 -future/moves/tkinter/font.py,sha256=TXarflhJRxqepaRNSDw6JFUVGz5P1T1C4_uF9VRqj3w,309 -future/moves/tkinter/messagebox.py,sha256=WJt4t83kLmr_UnpCWFuLoyazZr3wAUOEl6ADn3osoEA,327 -future/moves/tkinter/scrolledtext.py,sha256=DRzN8aBAlDBUo1B2KDHzdpRSzXBfH4rOOz0iuHXbQcg,329 -future/moves/tkinter/simpledialog.py,sha256=6MhuVhZCJV4XfPpPSUWKlDLLGEi0Y2ZlGQ9TbsmJFL0,329 -future/moves/tkinter/tix.py,sha256=aNeOfbWSGmcN69UmEGf4tJ-QIxLT6SU5ynzm1iWgepA,302 -future/moves/tkinter/ttk.py,sha256=rRrJpDjcP2gjQNukECu4F026P-CkW-3Ca2tN6Oia-Fw,302 -future/moves/urllib/__init__.py,sha256=yB9F-fDQpzu1v8cBoKgIrL2ScUNqjlkqEztYrGVCQ-0,110 -future/moves/urllib/error.py,sha256=gfrKzv-6W5OjzNIfjvJaQkxABRLym2KwjfKFXSdDB60,479 -future/moves/urllib/parse.py,sha256=xLLUMIIB5MreCdYzRZ5zIRWrhTRCoMO8RZEH4WPFQDY,1045 -future/moves/urllib/request.py,sha256=ttIzq60PwjRyrLQUGdAtfYvs4fziVwvcLe2Kw-hvE0g,3496 -future/moves/urllib/response.py,sha256=ZEZML0FpbB--GIeBFPvSzbtlVJ6EsR4tCI4qB7D8sFQ,342 -future/moves/urllib/robotparser.py,sha256=j24p6dMNzUpGZtT3BQxwRoE-F88iWmBpKgu0tRV61FQ,179 -future/moves/winreg.py,sha256=2zNAG59QI7vFlCj7kqDh0JrAYTpexOnI55PEAIjYhqo,163 -future/moves/xmlrpc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -future/moves/xmlrpc/client.py,sha256=2PfnL5IbKVwdKP7C8B1OUviEtuBObwoH4pAPfvHIvQc,143 -future/moves/xmlrpc/server.py,sha256=ESDXdpUgTKyeFmCDSnJmBp8zONjJklsRJOvy4OtaALc,143 -future/standard_library/__init__.py,sha256=7paz9IsD5qv_tvk5Rre3YrlA2_2aS1FJfI7UlrzAtWY,27743 -future/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -future/tests/base.py,sha256=7LTAKHJgUxOwmffD1kgcErVt2VouKcldPnq4iruqk_k,19956 -future/types/__init__.py,sha256=5fBxWqf_OTQ8jZ7k2TS34rFH14togeR488F4zBHIQ-s,6831 -future/types/newbytes.py,sha256=D_kNDD9sbNJir2cUxxePiAuw2OW5irxVnu55uHmuK9E,16303 -future/types/newdict.py,sha256=2N7P44cWmWtiDHvlK5ir15mW492gg6uP2n65d5bsDy4,3100 -future/types/newint.py,sha256=hJiv9qUDrjl1xkfzNFNLzafsRMPoFcRFceoivUzVIek,13286 -future/types/newlist.py,sha256=-H5-fXodd-UQgTFnZBJdwE68CrgIL_jViYdv4w7q2rU,2284 -future/types/newmemoryview.py,sha256=LnARgiKqQ2zLwwDZ3owu1atoonPQkOneWMfxJCwB4_o,712 -future/types/newobject.py,sha256=AX_n8GwlDR2IY-xIwZCvu0Olj_Ca2aS57nlTihnFr-I,3358 -future/types/newopen.py,sha256=lcRNHWZ1UjEn_0_XKis1ZA5U6l-4c-CHlC0WX1sY4NI,810 -future/types/newrange.py,sha256=7sgJaRaC4WIUtZ40K-c1d5QWruyaCWGgTVFadKo8qYA,5294 -future/types/newstr.py,sha256=e0brkurI0IK--4ToQEO4Cz1FECZav4CyUGMKxlrcmK4,15758 -future/utils/__init__.py,sha256=wsvXsKx-DXZichQ10Rdml-CWMqS79RNNynmdvfISpCU,21828 -future/utils/surrogateescape.py,sha256=7u4V4XlW83P5YSAJS2f92YUF8vsWthsiTnmAshOJL_M,6097 -libfuturize/__init__.py,sha256=CZA_KgvTQOPAY1_MrlJeQ6eMh2Eei4_KIv4JuyAkpfw,31 -libfuturize/fixer_util.py,sha256=Zhms5G97l40pyG1krQM2lCp-TxnocBdJkB2AbkAFnKY,17494 -libfuturize/fixes/__init__.py,sha256=5KEpUnjVsFCCsr_-zrikvJbLf9zslEJnFTH_5pBc33I,5236 -libfuturize/fixes/fix_UserDict.py,sha256=jL4jXnGaUQTkG8RKfGXbU_HVTkB3MWZMQwUkqMAjB6I,3840 -libfuturize/fixes/fix_absolute_import.py,sha256=vkrF2FyQR5lSz2WmdqywzkEJVTC0eq4gh_REWBKHh7w,3140 -libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py,sha256=Fr219VAzR8KWXc2_bfiqLl10EgxAWjL6cI3Mowt--VU,662 -libfuturize/fixes/fix_basestring.py,sha256=bHkKuMzhr5FMXwjXlMOjsod4S3rQkVdbzhoWV4-tl3Y,394 -libfuturize/fixes/fix_bytes.py,sha256=AhzOJes6EnPwgzboDjvURANbWKqciG6ZGaYW07PYQK8,685 -libfuturize/fixes/fix_cmp.py,sha256=Blq_Z0IGkYiKS83QzZ5wUgpJyZfQiZoEsWJ1VPyXgFY,701 -libfuturize/fixes/fix_division.py,sha256=gnrAi7stquiVUyi_De1H8q--43iQaSUX0CjnOmQ6O2w,228 -libfuturize/fixes/fix_division_safe.py,sha256=Y_HUfQJAxRClXkcfqWP5SFCsRYZOsLUsNjLXlGOA3cQ,3292 -libfuturize/fixes/fix_execfile.py,sha256=I5AcJ6vPZ7i70TChaq9inxqnZ4C04-yJyfAItGa8E3c,921 -libfuturize/fixes/fix_future_builtins.py,sha256=QBCRpD9XA7tbtfP4wmOF2DXquB4lq-eupkQj-QAxp0s,2027 -libfuturize/fixes/fix_future_standard_library.py,sha256=FVtflFt38efHe_SEX6k3m6IYAtKWjA4rAPZrlCv6yA0,733 -libfuturize/fixes/fix_future_standard_library_urllib.py,sha256=Rf81XcAXA-vwNvrhskf5sLExbR--Wkr5fiUcMYGAKzs,1001 -libfuturize/fixes/fix_input.py,sha256=bhaPNtMrZNbjWIDQCR7Iue5BxBj4rf0RJQ9_jiwvb-s,687 -libfuturize/fixes/fix_metaclass.py,sha256=GLB76wbuyUVciDgW9bgNNOBEnLeS_AR-fKABcPBZk6M,9568 -libfuturize/fixes/fix_next_call.py,sha256=01STG86Av9o5QcpQDJ6UbPhvxt9kKrkatiPeddXRgvA,3158 -libfuturize/fixes/fix_object.py,sha256=qalFIjn0VTWXG5sGOOoCvO65omjX5_9d40SUpwUjBdw,407 -libfuturize/fixes/fix_oldstr_wrap.py,sha256=UCR6Q2l-pVqJSrRTnQAWMlaqBoX7oX1VpG_w6Q0XcyY,1214 -libfuturize/fixes/fix_order___future__imports.py,sha256=ACUCw5NEGWvj6XA9rNj8BYha3ktxLvkM5Ssh5cyV644,829 -libfuturize/fixes/fix_print.py,sha256=92s1w2t9SynA3Y1_85-lexSBbgEWJM6lBrhCxVacfDc,3384 -libfuturize/fixes/fix_print_with_import.py,sha256=hVWn70Q1DPMUiHMyEqgUx-6sM1AylLj78v9pMc4LFw8,735 -libfuturize/fixes/fix_raise.py,sha256=mEXpM9sS6tenMmxayfqM-Kp9gUvaztTY61vFaqyMUuo,3884 -libfuturize/fixes/fix_remove_old__future__imports.py,sha256=j4EC1KEVgXhuQAqhYHnAruUjW6uczPjV_fTCSOLMuAw,851 -libfuturize/fixes/fix_unicode_keep_u.py,sha256=M8fcFxHeFnWVOKoQRpkMsnpd9qmUFubI2oFhO4ZPk7A,779 -libfuturize/fixes/fix_unicode_literals_import.py,sha256=wq-hb-9Yx3Az4ol-ylXZJPEDZ81EaPZeIy5VvpA0CEY,367 -libfuturize/fixes/fix_xrange_with_import.py,sha256=f074qStjMz3OtLjt1bKKZSxQnRbbb7HzEbqHt9wgqdw,479 -libfuturize/main.py,sha256=feICmcv0dzWhutvwz0unnIVxusbSlQZFDaxObkHebs8,13733 -libpasteurize/__init__.py,sha256=CZA_KgvTQOPAY1_MrlJeQ6eMh2Eei4_KIv4JuyAkpfw,31 -libpasteurize/fixes/__init__.py,sha256=ccdv-2MGjQMbq8XuEZBndHmbzGRrZnabksjXZLUv044,3719 -libpasteurize/fixes/feature_base.py,sha256=v7yLjBDBUPeNUc-YHGGlIsJDOQzFAM4Vo0RN5F1JHVU,1723 -libpasteurize/fixes/fix_add_all__future__imports.py,sha256=mHet1LgbHn9GfgCYGNZXKo-rseDWreAvUcAjZwdgeTE,676 -libpasteurize/fixes/fix_add_all_future_builtins.py,sha256=scfkY-Sz5j0yDtLYls2ENOcqEMPVxeDm9gFYYPINPB8,1269 -libpasteurize/fixes/fix_add_future_standard_library_import.py,sha256=thTRbkBzy_SJjZ0bJteTp0sBTx8Wr69xFakH4styf7Y,663 -libpasteurize/fixes/fix_annotations.py,sha256=VT_AorKY9AYWYZUZ17_CeUrJlEA7VGkwVLDQlwD1Bxo,1581 -libpasteurize/fixes/fix_division.py,sha256=_TD_c5KniAYqEm11O7NJF0v2WEhYSNkRGcKG_94ZOas,904 -libpasteurize/fixes/fix_features.py,sha256=NZn0n34_MYZpLNwyP1Tf51hOiN58Rg7A8tA9pK1S8-c,2675 -libpasteurize/fixes/fix_fullargspec.py,sha256=VlZuIU6QNrClmRuvC4mtLICL3yMCi-RcGCnS9fD4b-Q,438 -libpasteurize/fixes/fix_future_builtins.py,sha256=SlCK9I9u05m19Lr1wxlJxF8toZ5yu0yXBeDLxUN9_fw,1450 -libpasteurize/fixes/fix_getcwd.py,sha256=uebvTvFboLqsROFCwdnzoP6ThziM0skz9TDXHoJcFsQ,873 -libpasteurize/fixes/fix_imports.py,sha256=U4lIs_5Xp1qqM8mN72ieDkkIdiyALZFyCZsRC8ZmXlM,4944 -libpasteurize/fixes/fix_imports2.py,sha256=bs2V5Yv0v_8xLx-lNj9kNEAK2dLYXUXkZ2hxECg01CU,8580 -libpasteurize/fixes/fix_kwargs.py,sha256=NB_Ap8YJk-9ncoJRbOiPY_VMIigFgVB8m8AuY29DDhE,5991 -libpasteurize/fixes/fix_memoryview.py,sha256=Fwayx_ezpr22tbJ0-QrKdJ-FZTpU-m7y78l1h_N4xxc,551 -libpasteurize/fixes/fix_metaclass.py,sha256=IcE2KjaDG8jUR3FYXECzOC_cr2pr5r95W1NTbMrK8Wc,3260 -libpasteurize/fixes/fix_newstyle.py,sha256=78sazKOHm9DUoMyW4VdvQpMXZhicbXzorVPRhBpSUrM,888 -libpasteurize/fixes/fix_next.py,sha256=VHqcyORRNVqKJ51jJ1OkhwxHuXRgp8qaldyqcMvA4J0,1233 -libpasteurize/fixes/fix_printfunction.py,sha256=NDIfqVmUJBG3H9E6nrnN0cWZK8ch9pL4F-nMexdsa38,401 -libpasteurize/fixes/fix_raise.py,sha256=zQ_AcMsGmCbtKMgrxZGcHLYNscw6tqXFvHQxgqtNbU8,1099 -libpasteurize/fixes/fix_raise_.py,sha256=9STp633frUfYASjYzqhwxx_MXePNmMhfJClowRj8FLY,1225 -libpasteurize/fixes/fix_throw.py,sha256=_ZREVre-WttUvk4sWjrqUNqm9Q1uFaATECN0_-PXKbk,835 -libpasteurize/fixes/fix_unpacking.py,sha256=eMqRe44Nfq8lo0YFL9oKW75dGARmBSmklj4BCS_q1Lo,5946 -libpasteurize/main.py,sha256=dVHYTQQeJonuOFDNrenJZl-rKHgOQKRMPP1OqnJogWQ,8186 -past/__init__.py,sha256=wIiXaAvXl3svDi-fzuy6HDD0VsuCVr4cnqnCr8XINGI,2918 -past/builtins/__init__.py,sha256=7j_4OsUlN6q2eKr14do7mRQ1GwXRoXAMUR0A1fJpAls,1805 -past/builtins/misc.py,sha256=nw62HVSxuAgT-Q2lD3lmgRB9zmFXopS14dZHEv5xpDQ,2627 -past/builtins/noniterators.py,sha256=LtdELnd7KyYdXg7GkW25cgkEPUC0ggZ5AYMtDe9N95I,9370 -past/translation/__init__.py,sha256=j2e6mLeK74KEICqH6P_-tpKqSNZoMwip2toThhSmKpU,17646 -past/types/__init__.py,sha256=RyJlgqg9uJ8oF-kJT9QlfhfdmhiMh3fShmtvd2CQycY,879 -past/types/basestring.py,sha256=qrImcr24wvdDCMvF9x0Tyx8S1lCt6GIwRvzuAmvg_Tg,728 -past/types/olddict.py,sha256=0YtffZ55VY6AyQ_rwu4DZ4vcRsp6dz-dQzczeyN8hLk,2721 -past/types/oldstr.py,sha256=J2sJPC5jWEdpqXPcFwJFNDKn51TKhi86PsLFmJtQr-M,4332 -past/utils/__init__.py,sha256=e8l1sOfdiDJ3dkckBWLNWvC1ahC5BX5haHC2TGdNgA8,2633 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/entry_points.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/entry_points.txt deleted file mode 100644 index 45d1a88..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/entry_points.txt +++ /dev/null @@ -1,4 +0,0 @@ -[console_scripts] -futurize = libfuturize.main:main -pasteurize = libpasteurize.main:main - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/top_level.txt deleted file mode 100644 index 58f5843..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/top_level.txt +++ /dev/null @@ -1,4 +0,0 @@ -future -libfuturize -libpasteurize -past diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/__init__.py deleted file mode 100755 index ad419d6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/__init__.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -future: Easy, safe support for Python 2/3 compatibility -======================================================= - -``future`` is the missing compatibility layer between Python 2 and Python -3. It allows you to use a single, clean Python 3.x-compatible codebase to -support both Python 2 and Python 3 with minimal overhead. - -It is designed to be used as follows:: - - from __future__ import (absolute_import, division, - print_function, unicode_literals) - from builtins import ( - bytes, dict, int, list, object, range, str, - ascii, chr, hex, input, next, oct, open, - pow, round, super, - filter, map, zip) - -followed by predominantly standard, idiomatic Python 3 code that then runs -similarly on Python 2.6/2.7 and Python 3.3+. - -The imports have no effect on Python 3. On Python 2, they shadow the -corresponding builtins, which normally have different semantics on Python 3 -versus 2, to provide their Python 3 semantics. - - -Standard library reorganization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``future`` supports the standard library reorganization (PEP 3108) through the -following Py3 interfaces: - - >>> # Top-level packages with Py3 names provided on Py2: - >>> import html.parser - >>> import queue - >>> import tkinter.dialog - >>> import xmlrpc.client - >>> # etc. - - >>> # Aliases provided for extensions to existing Py2 module names: - >>> from future.standard_library import install_aliases - >>> install_aliases() - - >>> from collections import Counter, OrderedDict # backported to Py2.6 - >>> from collections import UserDict, UserList, UserString - >>> import urllib.request - >>> from itertools import filterfalse, zip_longest - >>> from subprocess import getoutput, getstatusoutput - - -Automatic conversion --------------------- - -An included script called `futurize -`_ aids in converting -code (from either Python 2 or Python 3) to code compatible with both -platforms. It is similar to ``python-modernize`` but goes further in -providing Python 3 compatibility through the use of the backported types -and builtin functions in ``future``. - - -Documentation -------------- - -See: http://python-future.org - - -Credits -------- - -:Author: Ed Schofield, Jordan M. Adler, et al -:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte - Ltd, Singapore. http://pythoncharmers.com -:Others: See docs/credits.rst or http://python-future.org/credits.html - - -Licensing ---------- -Copyright 2013-2019 Python Charmers Pty Ltd, Australia. -The software is distributed under an MIT licence. See LICENSE.txt. - -""" - -__title__ = 'future' -__author__ = 'Ed Schofield' -__license__ = 'MIT' -__copyright__ = 'Copyright 2013-2019 Python Charmers Pty Ltd' -__ver_major__ = 0 -__ver_minor__ = 18 -__ver_patch__ = 2 -__ver_sub__ = '' -__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__, - __ver_patch__, __ver_sub__) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/__init__.py deleted file mode 100755 index c71e065..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -future.backports package -""" - -from __future__ import absolute_import - -import sys - -__future_module__ = True -from future.standard_library import import_top_level_modules - - -if sys.version_info[0] >= 3: - import_top_level_modules() - - -from .misc import (ceil, - OrderedDict, - Counter, - ChainMap, - check_output, - count, - recursive_repr, - _count_elements, - cmp_to_key - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/_markupbase.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/_markupbase.py deleted file mode 100755 index d51bfc7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/_markupbase.py +++ /dev/null @@ -1,422 +0,0 @@ -"""Shared support for scanning document type declarations in HTML and XHTML. - -Backported for python-future from Python 3.3. Reason: ParserBase is an -old-style class in the Python 2.7 source of markupbase.py, which I suspect -might be the cause of sporadic unit-test failures on travis-ci.org with -test_htmlparser.py. The test failures look like this: - - ====================================================================== - -ERROR: test_attr_entity_replacement (future.tests.test_htmlparser.AttributesStrictTestCase) - ----------------------------------------------------------------------- - -Traceback (most recent call last): - File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 661, in test_attr_entity_replacement - [("starttag", "a", [("b", "&><\"'")])]) - File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 93, in _run_check - collector = self.get_collector() - File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 617, in get_collector - return EventCollector(strict=True) - File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 27, in __init__ - html.parser.HTMLParser.__init__(self, *args, **kw) - File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 135, in __init__ - self.reset() - File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 143, in reset - _markupbase.ParserBase.reset(self) - -TypeError: unbound method reset() must be called with ParserBase instance as first argument (got EventCollector instance instead) - -This module is used as a foundation for the html.parser module. It has no -documented public API and should not be used directly. - -""" - -import re - -_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match -_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match -_commentclose = re.compile(r'--\s*>') -_markedsectionclose = re.compile(r']\s*]\s*>') - -# An analysis of the MS-Word extensions is available at -# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf - -_msmarkedsectionclose = re.compile(r']\s*>') - -del re - - -class ParserBase(object): - """Parser base class which provides some common support methods used - by the SGML/HTML and XHTML parsers.""" - - def __init__(self): - if self.__class__ is ParserBase: - raise RuntimeError( - "_markupbase.ParserBase must be subclassed") - - def error(self, message): - raise NotImplementedError( - "subclasses of ParserBase must override error()") - - def reset(self): - self.lineno = 1 - self.offset = 0 - - def getpos(self): - """Return current line number and offset.""" - return self.lineno, self.offset - - # Internal -- update line number and offset. This should be - # called for each piece of data exactly once, in order -- in other - # words the concatenation of all the input strings to this - # function should be exactly the entire input. - def updatepos(self, i, j): - if i >= j: - return j - rawdata = self.rawdata - nlines = rawdata.count("\n", i, j) - if nlines: - self.lineno = self.lineno + nlines - pos = rawdata.rindex("\n", i, j) # Should not fail - self.offset = j-(pos+1) - else: - self.offset = self.offset + j-i - return j - - _decl_otherchars = '' - - # Internal -- parse declaration (for use by subclasses). - def parse_declaration(self, i): - # This is some sort of declaration; in "HTML as - # deployed," this should only be the document type - # declaration (""). - # ISO 8879:1986, however, has more complex - # declaration syntax for elements in , including: - # --comment-- - # [marked section] - # name in the following list: ENTITY, DOCTYPE, ELEMENT, - # ATTLIST, NOTATION, SHORTREF, USEMAP, - # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM - rawdata = self.rawdata - j = i + 2 - assert rawdata[i:j] == "": - # the empty comment - return j + 1 - if rawdata[j:j+1] in ("-", ""): - # Start of comment followed by buffer boundary, - # or just a buffer boundary. - return -1 - # A simple, practical version could look like: ((name|stringlit) S*) + '>' - n = len(rawdata) - if rawdata[j:j+2] == '--': #comment - # Locate --.*-- as the body of the comment - return self.parse_comment(i) - elif rawdata[j] == '[': #marked section - # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section - # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA - # Note that this is extended by Microsoft Office "Save as Web" function - # to include [if...] and [endif]. - return self.parse_marked_section(i) - else: #all other declaration elements - decltype, j = self._scan_name(j, i) - if j < 0: - return j - if decltype == "doctype": - self._decl_otherchars = '' - while j < n: - c = rawdata[j] - if c == ">": - # end of declaration syntax - data = rawdata[i+2:j] - if decltype == "doctype": - self.handle_decl(data) - else: - # According to the HTML5 specs sections "8.2.4.44 Bogus - # comment state" and "8.2.4.45 Markup declaration open - # state", a comment token should be emitted. - # Calling unknown_decl provides more flexibility though. - self.unknown_decl(data) - return j + 1 - if c in "\"'": - m = _declstringlit_match(rawdata, j) - if not m: - return -1 # incomplete - j = m.end() - elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": - name, j = self._scan_name(j, i) - elif c in self._decl_otherchars: - j = j + 1 - elif c == "[": - # this could be handled in a separate doctype parser - if decltype == "doctype": - j = self._parse_doctype_subset(j + 1, i) - elif decltype in set(["attlist", "linktype", "link", "element"]): - # must tolerate []'d groups in a content model in an element declaration - # also in data attribute specifications of attlist declaration - # also link type declaration subsets in linktype declarations - # also link attribute specification lists in link declarations - self.error("unsupported '[' char in %s declaration" % decltype) - else: - self.error("unexpected '[' char in declaration") - else: - self.error( - "unexpected %r char in declaration" % rawdata[j]) - if j < 0: - return j - return -1 # incomplete - - # Internal -- parse a marked section - # Override this to handle MS-word extension syntax content - def parse_marked_section(self, i, report=1): - rawdata= self.rawdata - assert rawdata[i:i+3] == ' ending - match= _markedsectionclose.search(rawdata, i+3) - elif sectName in set(["if", "else", "endif"]): - # look for MS Office ]> ending - match= _msmarkedsectionclose.search(rawdata, i+3) - else: - self.error('unknown status keyword %r in marked section' % rawdata[i+3:j]) - if not match: - return -1 - if report: - j = match.start(0) - self.unknown_decl(rawdata[i+3: j]) - return match.end(0) - - # Internal -- parse comment, return length or -1 if not terminated - def parse_comment(self, i, report=1): - rawdata = self.rawdata - if rawdata[i:i+4] != ' delimiter transport-padding - # --> CRLF body-part - for body_part in msgtexts: - # delimiter transport-padding CRLF - self.write(self._NL + '--' + boundary + self._NL) - # body-part - self._fp.write(body_part) - # close-delimiter transport-padding - self.write(self._NL + '--' + boundary + '--') - if msg.epilogue is not None: - self.write(self._NL) - if self._mangle_from_: - epilogue = fcre.sub('>From ', msg.epilogue) - else: - epilogue = msg.epilogue - self._write_lines(epilogue) - - def _handle_multipart_signed(self, msg): - # The contents of signed parts has to stay unmodified in order to keep - # the signature intact per RFC1847 2.1, so we disable header wrapping. - # RDM: This isn't enough to completely preserve the part, but it helps. - p = self.policy - self.policy = p.clone(max_line_length=0) - try: - self._handle_multipart(msg) - finally: - self.policy = p - - def _handle_message_delivery_status(self, msg): - # We can't just write the headers directly to self's file object - # because this will leave an extra newline between the last header - # block and the boundary. Sigh. - blocks = [] - for part in msg.get_payload(): - s = self._new_buffer() - g = self.clone(s) - g.flatten(part, unixfrom=False, linesep=self._NL) - text = s.getvalue() - lines = text.split(self._encoded_NL) - # Strip off the unnecessary trailing empty line - if lines and lines[-1] == self._encoded_EMPTY: - blocks.append(self._encoded_NL.join(lines[:-1])) - else: - blocks.append(text) - # Now join all the blocks with an empty line. This has the lovely - # effect of separating each block with an empty line, but not adding - # an extra one after the last one. - self._fp.write(self._encoded_NL.join(blocks)) - - def _handle_message(self, msg): - s = self._new_buffer() - g = self.clone(s) - # The payload of a message/rfc822 part should be a multipart sequence - # of length 1. The zeroth element of the list should be the Message - # object for the subpart. Extract that object, stringify it, and - # write it out. - # Except, it turns out, when it's a string instead, which happens when - # and only when HeaderParser is used on a message of mime type - # message/rfc822. Such messages are generated by, for example, - # Groupwise when forwarding unadorned messages. (Issue 7970.) So - # in that case we just emit the string body. - payload = msg._payload - if isinstance(payload, list): - g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL) - payload = s.getvalue() - else: - payload = self._encode(payload) - self._fp.write(payload) - - # This used to be a module level function; we use a classmethod for this - # and _compile_re so we can continue to provide the module level function - # for backward compatibility by doing - # _make_boudary = Generator._make_boundary - # at the end of the module. It *is* internal, so we could drop that... - @classmethod - def _make_boundary(cls, text=None): - # Craft a random boundary. If text is given, ensure that the chosen - # boundary doesn't appear in the text. - token = random.randrange(sys.maxsize) - boundary = ('=' * 15) + (_fmt % token) + '==' - if text is None: - return boundary - b = boundary - counter = 0 - while True: - cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE) - if not cre.search(text): - break - b = boundary + '.' + str(counter) - counter += 1 - return b - - @classmethod - def _compile_re(cls, s, flags): - return re.compile(s, flags) - -class BytesGenerator(Generator): - """Generates a bytes version of a Message object tree. - - Functionally identical to the base Generator except that the output is - bytes and not string. When surrogates were used in the input to encode - bytes, these are decoded back to bytes for output. If the policy has - cte_type set to 7bit, then the message is transformed such that the - non-ASCII bytes are properly content transfer encoded, using the charset - unknown-8bit. - - The outfp object must accept bytes in its write method. - """ - - # Bytes versions of this constant for use in manipulating data from - # the BytesIO buffer. - _encoded_EMPTY = b'' - - def write(self, s): - self._fp.write(str(s).encode('ascii', 'surrogateescape')) - - def _new_buffer(self): - return BytesIO() - - def _encode(self, s): - return s.encode('ascii') - - def _write_headers(self, msg): - # This is almost the same as the string version, except for handling - # strings with 8bit bytes. - for h, v in msg.raw_items(): - self._fp.write(self.policy.fold_binary(h, v)) - # A blank line always separates headers from body - self.write(self._NL) - - def _handle_text(self, msg): - # If the string has surrogates the original source was bytes, so - # just write it back out. - if msg._payload is None: - return - if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit': - if self._mangle_from_: - msg._payload = fcre.sub(">From ", msg._payload) - self._write_lines(msg._payload) - else: - super(BytesGenerator,self)._handle_text(msg) - - # Default body handler - _writeBody = _handle_text - - @classmethod - def _compile_re(cls, s, flags): - return re.compile(s.encode('ascii'), flags) - - -_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]' - -class DecodedGenerator(Generator): - """Generates a text representation of a message. - - Like the Generator base class, except that non-text parts are substituted - with a format string representing the part. - """ - def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None): - """Like Generator.__init__() except that an additional optional - argument is allowed. - - Walks through all subparts of a message. If the subpart is of main - type `text', then it prints the decoded payload of the subpart. - - Otherwise, fmt is a format string that is used instead of the message - payload. fmt is expanded with the following keywords (in - %(keyword)s format): - - type : Full MIME type of the non-text part - maintype : Main MIME type of the non-text part - subtype : Sub-MIME type of the non-text part - filename : Filename of the non-text part - description: Description associated with the non-text part - encoding : Content transfer encoding of the non-text part - - The default value for fmt is None, meaning - - [Non-text (%(type)s) part of message omitted, filename %(filename)s] - """ - Generator.__init__(self, outfp, mangle_from_, maxheaderlen) - if fmt is None: - self._fmt = _FMT - else: - self._fmt = fmt - - def _dispatch(self, msg): - for part in msg.walk(): - maintype = part.get_content_maintype() - if maintype == 'text': - print(part.get_payload(decode=False), file=self) - elif maintype == 'multipart': - # Just skip this - pass - else: - print(self._fmt % { - 'type' : part.get_content_type(), - 'maintype' : part.get_content_maintype(), - 'subtype' : part.get_content_subtype(), - 'filename' : part.get_filename('[no filename]'), - 'description': part.get('Content-Description', - '[no description]'), - 'encoding' : part.get('Content-Transfer-Encoding', - '[no encoding]'), - }, file=self) - - -# Helper used by Generator._make_boundary -_width = len(repr(sys.maxsize-1)) -_fmt = '%%0%dd' % _width - -# Backward compatibility -_make_boundary = Generator._make_boundary diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/header.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/header.py deleted file mode 100755 index 63bf038..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/header.py +++ /dev/null @@ -1,581 +0,0 @@ -# Copyright (C) 2002-2007 Python Software Foundation -# Author: Ben Gertzfield, Barry Warsaw -# Contact: email-sig@python.org - -"""Header encoding and decoding functionality.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import -from future.builtins import bytes, range, str, super, zip - -__all__ = [ - 'Header', - 'decode_header', - 'make_header', - ] - -import re -import binascii - -from future.backports import email -from future.backports.email import base64mime -from future.backports.email.errors import HeaderParseError -import future.backports.email.charset as _charset - -# Helpers -from future.backports.email.quoprimime import _max_append, header_decode - -Charset = _charset.Charset - -NL = '\n' -SPACE = ' ' -BSPACE = b' ' -SPACE8 = ' ' * 8 -EMPTYSTRING = '' -MAXLINELEN = 78 -FWS = ' \t' - -USASCII = Charset('us-ascii') -UTF8 = Charset('utf-8') - -# Match encoded-word strings in the form =?charset?q?Hello_World?= -ecre = re.compile(r''' - =\? # literal =? - (?P[^?]*?) # non-greedy up to the next ? is the charset - \? # literal ? - (?P[qb]) # either a "q" or a "b", case insensitive - \? # literal ? - (?P.*?) # non-greedy up to the next ?= is the encoded string - \?= # literal ?= - ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE) - -# Field name regexp, including trailing colon, but not separating whitespace, -# according to RFC 2822. Character range is from tilde to exclamation mark. -# For use with .match() -fcre = re.compile(r'[\041-\176]+:$') - -# Find a header embedded in a putative header value. Used to check for -# header injection attack. -_embeded_header = re.compile(r'\n[^ \t]+:') - - -def decode_header(header): - """Decode a message header value without converting charset. - - Returns a list of (string, charset) pairs containing each of the decoded - parts of the header. Charset is None for non-encoded parts of the header, - otherwise a lower-case string containing the name of the character set - specified in the encoded string. - - header may be a string that may or may not contain RFC2047 encoded words, - or it may be a Header object. - - An email.errors.HeaderParseError may be raised when certain decoding error - occurs (e.g. a base64 decoding exception). - """ - # If it is a Header object, we can just return the encoded chunks. - if hasattr(header, '_chunks'): - return [(_charset._encode(string, str(charset)), str(charset)) - for string, charset in header._chunks] - # If no encoding, just return the header with no charset. - if not ecre.search(header): - return [(header, None)] - # First step is to parse all the encoded parts into triplets of the form - # (encoded_string, encoding, charset). For unencoded strings, the last - # two parts will be None. - words = [] - for line in header.splitlines(): - parts = ecre.split(line) - first = True - while parts: - unencoded = parts.pop(0) - if first: - unencoded = unencoded.lstrip() - first = False - if unencoded: - words.append((unencoded, None, None)) - if parts: - charset = parts.pop(0).lower() - encoding = parts.pop(0).lower() - encoded = parts.pop(0) - words.append((encoded, encoding, charset)) - # Now loop over words and remove words that consist of whitespace - # between two encoded strings. - import sys - droplist = [] - for n, w in enumerate(words): - if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace(): - droplist.append(n-1) - for d in reversed(droplist): - del words[d] - - # The next step is to decode each encoded word by applying the reverse - # base64 or quopri transformation. decoded_words is now a list of the - # form (decoded_word, charset). - decoded_words = [] - for encoded_string, encoding, charset in words: - if encoding is None: - # This is an unencoded word. - decoded_words.append((encoded_string, charset)) - elif encoding == 'q': - word = header_decode(encoded_string) - decoded_words.append((word, charset)) - elif encoding == 'b': - paderr = len(encoded_string) % 4 # Postel's law: add missing padding - if paderr: - encoded_string += '==='[:4 - paderr] - try: - word = base64mime.decode(encoded_string) - except binascii.Error: - raise HeaderParseError('Base64 decoding error') - else: - decoded_words.append((word, charset)) - else: - raise AssertionError('Unexpected encoding: ' + encoding) - # Now convert all words to bytes and collapse consecutive runs of - # similarly encoded words. - collapsed = [] - last_word = last_charset = None - for word, charset in decoded_words: - if isinstance(word, str): - word = bytes(word, 'raw-unicode-escape') - if last_word is None: - last_word = word - last_charset = charset - elif charset != last_charset: - collapsed.append((last_word, last_charset)) - last_word = word - last_charset = charset - elif last_charset is None: - last_word += BSPACE + word - else: - last_word += word - collapsed.append((last_word, last_charset)) - return collapsed - - -def make_header(decoded_seq, maxlinelen=None, header_name=None, - continuation_ws=' '): - """Create a Header from a sequence of pairs as returned by decode_header() - - decode_header() takes a header value string and returns a sequence of - pairs of the format (decoded_string, charset) where charset is the string - name of the character set. - - This function takes one of those sequence of pairs and returns a Header - instance. Optional maxlinelen, header_name, and continuation_ws are as in - the Header constructor. - """ - h = Header(maxlinelen=maxlinelen, header_name=header_name, - continuation_ws=continuation_ws) - for s, charset in decoded_seq: - # None means us-ascii but we can simply pass it on to h.append() - if charset is not None and not isinstance(charset, Charset): - charset = Charset(charset) - h.append(s, charset) - return h - - -class Header(object): - def __init__(self, s=None, charset=None, - maxlinelen=None, header_name=None, - continuation_ws=' ', errors='strict'): - """Create a MIME-compliant header that can contain many character sets. - - Optional s is the initial header value. If None, the initial header - value is not set. You can later append to the header with .append() - method calls. s may be a byte string or a Unicode string, but see the - .append() documentation for semantics. - - Optional charset serves two purposes: it has the same meaning as the - charset argument to the .append() method. It also sets the default - character set for all subsequent .append() calls that omit the charset - argument. If charset is not provided in the constructor, the us-ascii - charset is used both as s's initial charset and as the default for - subsequent .append() calls. - - The maximum line length can be specified explicitly via maxlinelen. For - splitting the first line to a shorter value (to account for the field - header which isn't included in s, e.g. `Subject') pass in the name of - the field in header_name. The default maxlinelen is 78 as recommended - by RFC 2822. - - continuation_ws must be RFC 2822 compliant folding whitespace (usually - either a space or a hard tab) which will be prepended to continuation - lines. - - errors is passed through to the .append() call. - """ - if charset is None: - charset = USASCII - elif not isinstance(charset, Charset): - charset = Charset(charset) - self._charset = charset - self._continuation_ws = continuation_ws - self._chunks = [] - if s is not None: - self.append(s, charset, errors) - if maxlinelen is None: - maxlinelen = MAXLINELEN - self._maxlinelen = maxlinelen - if header_name is None: - self._headerlen = 0 - else: - # Take the separating colon and space into account. - self._headerlen = len(header_name) + 2 - - def __str__(self): - """Return the string value of the header.""" - self._normalize() - uchunks = [] - lastcs = None - lastspace = None - for string, charset in self._chunks: - # We must preserve spaces between encoded and non-encoded word - # boundaries, which means for us we need to add a space when we go - # from a charset to None/us-ascii, or from None/us-ascii to a - # charset. Only do this for the second and subsequent chunks. - # Don't add a space if the None/us-ascii string already has - # a space (trailing or leading depending on transition) - nextcs = charset - if nextcs == _charset.UNKNOWN8BIT: - original_bytes = string.encode('ascii', 'surrogateescape') - string = original_bytes.decode('ascii', 'replace') - if uchunks: - hasspace = string and self._nonctext(string[0]) - if lastcs not in (None, 'us-ascii'): - if nextcs in (None, 'us-ascii') and not hasspace: - uchunks.append(SPACE) - nextcs = None - elif nextcs not in (None, 'us-ascii') and not lastspace: - uchunks.append(SPACE) - lastspace = string and self._nonctext(string[-1]) - lastcs = nextcs - uchunks.append(string) - return EMPTYSTRING.join(uchunks) - - # Rich comparison operators for equality only. BAW: does it make sense to - # have or explicitly disable <, <=, >, >= operators? - def __eq__(self, other): - # other may be a Header or a string. Both are fine so coerce - # ourselves to a unicode (of the unencoded header value), swap the - # args and do another comparison. - return other == str(self) - - def __ne__(self, other): - return not self == other - - def append(self, s, charset=None, errors='strict'): - """Append a string to the MIME header. - - Optional charset, if given, should be a Charset instance or the name - of a character set (which will be converted to a Charset instance). A - value of None (the default) means that the charset given in the - constructor is used. - - s may be a byte string or a Unicode string. If it is a byte string - (i.e. isinstance(s, str) is false), then charset is the encoding of - that byte string, and a UnicodeError will be raised if the string - cannot be decoded with that charset. If s is a Unicode string, then - charset is a hint specifying the character set of the characters in - the string. In either case, when producing an RFC 2822 compliant - header using RFC 2047 rules, the string will be encoded using the - output codec of the charset. If the string cannot be encoded to the - output codec, a UnicodeError will be raised. - - Optional `errors' is passed as the errors argument to the decode - call if s is a byte string. - """ - if charset is None: - charset = self._charset - elif not isinstance(charset, Charset): - charset = Charset(charset) - if not isinstance(s, str): - input_charset = charset.input_codec or 'us-ascii' - if input_charset == _charset.UNKNOWN8BIT: - s = s.decode('us-ascii', 'surrogateescape') - else: - s = s.decode(input_charset, errors) - # Ensure that the bytes we're storing can be decoded to the output - # character set, otherwise an early error is raised. - output_charset = charset.output_codec or 'us-ascii' - if output_charset != _charset.UNKNOWN8BIT: - try: - s.encode(output_charset, errors) - except UnicodeEncodeError: - if output_charset!='us-ascii': - raise - charset = UTF8 - self._chunks.append((s, charset)) - - def _nonctext(self, s): - """True if string s is not a ctext character of RFC822. - """ - return s.isspace() or s in ('(', ')', '\\') - - def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'): - r"""Encode a message header into an RFC-compliant format. - - There are many issues involved in converting a given string for use in - an email header. Only certain character sets are readable in most - email clients, and as header strings can only contain a subset of - 7-bit ASCII, care must be taken to properly convert and encode (with - Base64 or quoted-printable) header strings. In addition, there is a - 75-character length limit on any given encoded header field, so - line-wrapping must be performed, even with double-byte character sets. - - Optional maxlinelen specifies the maximum length of each generated - line, exclusive of the linesep string. Individual lines may be longer - than maxlinelen if a folding point cannot be found. The first line - will be shorter by the length of the header name plus ": " if a header - name was specified at Header construction time. The default value for - maxlinelen is determined at header construction time. - - Optional splitchars is a string containing characters which should be - given extra weight by the splitting algorithm during normal header - wrapping. This is in very rough support of RFC 2822's `higher level - syntactic breaks': split points preceded by a splitchar are preferred - during line splitting, with the characters preferred in the order in - which they appear in the string. Space and tab may be included in the - string to indicate whether preference should be given to one over the - other as a split point when other split chars do not appear in the line - being split. Splitchars does not affect RFC 2047 encoded lines. - - Optional linesep is a string to be used to separate the lines of - the value. The default value is the most useful for typical - Python applications, but it can be set to \r\n to produce RFC-compliant - line separators when needed. - """ - self._normalize() - if maxlinelen is None: - maxlinelen = self._maxlinelen - # A maxlinelen of 0 means don't wrap. For all practical purposes, - # choosing a huge number here accomplishes that and makes the - # _ValueFormatter algorithm much simpler. - if maxlinelen == 0: - maxlinelen = 1000000 - formatter = _ValueFormatter(self._headerlen, maxlinelen, - self._continuation_ws, splitchars) - lastcs = None - hasspace = lastspace = None - for string, charset in self._chunks: - if hasspace is not None: - hasspace = string and self._nonctext(string[0]) - import sys - if lastcs not in (None, 'us-ascii'): - if not hasspace or charset not in (None, 'us-ascii'): - formatter.add_transition() - elif charset not in (None, 'us-ascii') and not lastspace: - formatter.add_transition() - lastspace = string and self._nonctext(string[-1]) - lastcs = charset - hasspace = False - lines = string.splitlines() - if lines: - formatter.feed('', lines[0], charset) - else: - formatter.feed('', '', charset) - for line in lines[1:]: - formatter.newline() - if charset.header_encoding is not None: - formatter.feed(self._continuation_ws, ' ' + line.lstrip(), - charset) - else: - sline = line.lstrip() - fws = line[:len(line)-len(sline)] - formatter.feed(fws, sline, charset) - if len(lines) > 1: - formatter.newline() - if self._chunks: - formatter.add_transition() - value = formatter._str(linesep) - if _embeded_header.search(value): - raise HeaderParseError("header value appears to contain " - "an embedded header: {!r}".format(value)) - return value - - def _normalize(self): - # Step 1: Normalize the chunks so that all runs of identical charsets - # get collapsed into a single unicode string. - chunks = [] - last_charset = None - last_chunk = [] - for string, charset in self._chunks: - if charset == last_charset: - last_chunk.append(string) - else: - if last_charset is not None: - chunks.append((SPACE.join(last_chunk), last_charset)) - last_chunk = [string] - last_charset = charset - if last_chunk: - chunks.append((SPACE.join(last_chunk), last_charset)) - self._chunks = chunks - - -class _ValueFormatter(object): - def __init__(self, headerlen, maxlen, continuation_ws, splitchars): - self._maxlen = maxlen - self._continuation_ws = continuation_ws - self._continuation_ws_len = len(continuation_ws) - self._splitchars = splitchars - self._lines = [] - self._current_line = _Accumulator(headerlen) - - def _str(self, linesep): - self.newline() - return linesep.join(self._lines) - - def __str__(self): - return self._str(NL) - - def newline(self): - end_of_line = self._current_line.pop() - if end_of_line != (' ', ''): - self._current_line.push(*end_of_line) - if len(self._current_line) > 0: - if self._current_line.is_onlyws(): - self._lines[-1] += str(self._current_line) - else: - self._lines.append(str(self._current_line)) - self._current_line.reset() - - def add_transition(self): - self._current_line.push(' ', '') - - def feed(self, fws, string, charset): - # If the charset has no header encoding (i.e. it is an ASCII encoding) - # then we must split the header at the "highest level syntactic break" - # possible. Note that we don't have a lot of smarts about field - # syntax; we just try to break on semi-colons, then commas, then - # whitespace. Eventually, this should be pluggable. - if charset.header_encoding is None: - self._ascii_split(fws, string, self._splitchars) - return - # Otherwise, we're doing either a Base64 or a quoted-printable - # encoding which means we don't need to split the line on syntactic - # breaks. We can basically just find enough characters to fit on the - # current line, minus the RFC 2047 chrome. What makes this trickier - # though is that we have to split at octet boundaries, not character - # boundaries but it's only safe to split at character boundaries so at - # best we can only get close. - encoded_lines = charset.header_encode_lines(string, self._maxlengths()) - # The first element extends the current line, but if it's None then - # nothing more fit on the current line so start a new line. - try: - first_line = encoded_lines.pop(0) - except IndexError: - # There are no encoded lines, so we're done. - return - if first_line is not None: - self._append_chunk(fws, first_line) - try: - last_line = encoded_lines.pop() - except IndexError: - # There was only one line. - return - self.newline() - self._current_line.push(self._continuation_ws, last_line) - # Everything else are full lines in themselves. - for line in encoded_lines: - self._lines.append(self._continuation_ws + line) - - def _maxlengths(self): - # The first line's length. - yield self._maxlen - len(self._current_line) - while True: - yield self._maxlen - self._continuation_ws_len - - def _ascii_split(self, fws, string, splitchars): - # The RFC 2822 header folding algorithm is simple in principle but - # complex in practice. Lines may be folded any place where "folding - # white space" appears by inserting a linesep character in front of the - # FWS. The complication is that not all spaces or tabs qualify as FWS, - # and we are also supposed to prefer to break at "higher level - # syntactic breaks". We can't do either of these without intimate - # knowledge of the structure of structured headers, which we don't have - # here. So the best we can do here is prefer to break at the specified - # splitchars, and hope that we don't choose any spaces or tabs that - # aren't legal FWS. (This is at least better than the old algorithm, - # where we would sometimes *introduce* FWS after a splitchar, or the - # algorithm before that, where we would turn all white space runs into - # single spaces or tabs.) - parts = re.split("(["+FWS+"]+)", fws+string) - if parts[0]: - parts[:0] = [''] - else: - parts.pop(0) - for fws, part in zip(*[iter(parts)]*2): - self._append_chunk(fws, part) - - def _append_chunk(self, fws, string): - self._current_line.push(fws, string) - if len(self._current_line) > self._maxlen: - # Find the best split point, working backward from the end. - # There might be none, on a long first line. - for ch in self._splitchars: - for i in range(self._current_line.part_count()-1, 0, -1): - if ch.isspace(): - fws = self._current_line[i][0] - if fws and fws[0]==ch: - break - prevpart = self._current_line[i-1][1] - if prevpart and prevpart[-1]==ch: - break - else: - continue - break - else: - fws, part = self._current_line.pop() - if self._current_line._initial_size > 0: - # There will be a header, so leave it on a line by itself. - self.newline() - if not fws: - # We don't use continuation_ws here because the whitespace - # after a header should always be a space. - fws = ' ' - self._current_line.push(fws, part) - return - remainder = self._current_line.pop_from(i) - self._lines.append(str(self._current_line)) - self._current_line.reset(remainder) - - -class _Accumulator(list): - - def __init__(self, initial_size=0): - self._initial_size = initial_size - super().__init__() - - def push(self, fws, string): - self.append((fws, string)) - - def pop_from(self, i=0): - popped = self[i:] - self[i:] = [] - return popped - - def pop(self): - if self.part_count()==0: - return ('', '') - return super().pop() - - def __len__(self): - return sum((len(fws)+len(part) for fws, part in self), - self._initial_size) - - def __str__(self): - return EMPTYSTRING.join((EMPTYSTRING.join((fws, part)) - for fws, part in self)) - - def reset(self, startval=None): - if startval is None: - startval = [] - self[:] = startval - self._initial_size = 0 - - def is_onlyws(self): - return self._initial_size==0 and (not self or str(self).isspace()) - - def part_count(self): - return super().__len__() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/headerregistry.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/headerregistry.py deleted file mode 100755 index 9aaad65..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/headerregistry.py +++ /dev/null @@ -1,592 +0,0 @@ -"""Representing and manipulating email headers via custom objects. - -This module provides an implementation of the HeaderRegistry API. -The implementation is designed to flexibly follow RFC5322 rules. - -Eventually HeaderRegistry will be a public API, but it isn't yet, -and will probably change some before that happens. - -""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from future.builtins import super -from future.builtins import str -from future.utils import text_to_native_str -from future.backports.email import utils -from future.backports.email import errors -from future.backports.email import _header_value_parser as parser - -class Address(object): - - def __init__(self, display_name='', username='', domain='', addr_spec=None): - """Create an object represeting a full email address. - - An address can have a 'display_name', a 'username', and a 'domain'. In - addition to specifying the username and domain separately, they may be - specified together by using the addr_spec keyword *instead of* the - username and domain keywords. If an addr_spec string is specified it - must be properly quoted according to RFC 5322 rules; an error will be - raised if it is not. - - An Address object has display_name, username, domain, and addr_spec - attributes, all of which are read-only. The addr_spec and the string - value of the object are both quoted according to RFC5322 rules, but - without any Content Transfer Encoding. - - """ - # This clause with its potential 'raise' may only happen when an - # application program creates an Address object using an addr_spec - # keyword. The email library code itself must always supply username - # and domain. - if addr_spec is not None: - if username or domain: - raise TypeError("addrspec specified when username and/or " - "domain also specified") - a_s, rest = parser.get_addr_spec(addr_spec) - if rest: - raise ValueError("Invalid addr_spec; only '{}' " - "could be parsed from '{}'".format( - a_s, addr_spec)) - if a_s.all_defects: - raise a_s.all_defects[0] - username = a_s.local_part - domain = a_s.domain - self._display_name = display_name - self._username = username - self._domain = domain - - @property - def display_name(self): - return self._display_name - - @property - def username(self): - return self._username - - @property - def domain(self): - return self._domain - - @property - def addr_spec(self): - """The addr_spec (username@domain) portion of the address, quoted - according to RFC 5322 rules, but with no Content Transfer Encoding. - """ - nameset = set(self.username) - if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS): - lp = parser.quote_string(self.username) - else: - lp = self.username - if self.domain: - return lp + '@' + self.domain - if not lp: - return '<>' - return lp - - def __repr__(self): - return "Address(display_name={!r}, username={!r}, domain={!r})".format( - self.display_name, self.username, self.domain) - - def __str__(self): - nameset = set(self.display_name) - if len(nameset) > len(nameset-parser.SPECIALS): - disp = parser.quote_string(self.display_name) - else: - disp = self.display_name - if disp: - addr_spec = '' if self.addr_spec=='<>' else self.addr_spec - return "{} <{}>".format(disp, addr_spec) - return self.addr_spec - - def __eq__(self, other): - if type(other) != type(self): - return False - return (self.display_name == other.display_name and - self.username == other.username and - self.domain == other.domain) - - -class Group(object): - - def __init__(self, display_name=None, addresses=None): - """Create an object representing an address group. - - An address group consists of a display_name followed by colon and an - list of addresses (see Address) terminated by a semi-colon. The Group - is created by specifying a display_name and a possibly empty list of - Address objects. A Group can also be used to represent a single - address that is not in a group, which is convenient when manipulating - lists that are a combination of Groups and individual Addresses. In - this case the display_name should be set to None. In particular, the - string representation of a Group whose display_name is None is the same - as the Address object, if there is one and only one Address object in - the addresses list. - - """ - self._display_name = display_name - self._addresses = tuple(addresses) if addresses else tuple() - - @property - def display_name(self): - return self._display_name - - @property - def addresses(self): - return self._addresses - - def __repr__(self): - return "Group(display_name={!r}, addresses={!r}".format( - self.display_name, self.addresses) - - def __str__(self): - if self.display_name is None and len(self.addresses)==1: - return str(self.addresses[0]) - disp = self.display_name - if disp is not None: - nameset = set(disp) - if len(nameset) > len(nameset-parser.SPECIALS): - disp = parser.quote_string(disp) - adrstr = ", ".join(str(x) for x in self.addresses) - adrstr = ' ' + adrstr if adrstr else adrstr - return "{}:{};".format(disp, adrstr) - - def __eq__(self, other): - if type(other) != type(self): - return False - return (self.display_name == other.display_name and - self.addresses == other.addresses) - - -# Header Classes # - -class BaseHeader(str): - - """Base class for message headers. - - Implements generic behavior and provides tools for subclasses. - - A subclass must define a classmethod named 'parse' that takes an unfolded - value string and a dictionary as its arguments. The dictionary will - contain one key, 'defects', initialized to an empty list. After the call - the dictionary must contain two additional keys: parse_tree, set to the - parse tree obtained from parsing the header, and 'decoded', set to the - string value of the idealized representation of the data from the value. - (That is, encoded words are decoded, and values that have canonical - representations are so represented.) - - The defects key is intended to collect parsing defects, which the message - parser will subsequently dispose of as appropriate. The parser should not, - insofar as practical, raise any errors. Defects should be added to the - list instead. The standard header parsers register defects for RFC - compliance issues, for obsolete RFC syntax, and for unrecoverable parsing - errors. - - The parse method may add additional keys to the dictionary. In this case - the subclass must define an 'init' method, which will be passed the - dictionary as its keyword arguments. The method should use (usually by - setting them as the value of similarly named attributes) and remove all the - extra keys added by its parse method, and then use super to call its parent - class with the remaining arguments and keywords. - - The subclass should also make sure that a 'max_count' attribute is defined - that is either None or 1. XXX: need to better define this API. - - """ - - def __new__(cls, name, value): - kwds = {'defects': []} - cls.parse(value, kwds) - if utils._has_surrogates(kwds['decoded']): - kwds['decoded'] = utils._sanitize(kwds['decoded']) - self = str.__new__(cls, kwds['decoded']) - # del kwds['decoded'] - self.init(name, **kwds) - return self - - def init(self, name, **_3to2kwargs): - defects = _3to2kwargs['defects']; del _3to2kwargs['defects'] - parse_tree = _3to2kwargs['parse_tree']; del _3to2kwargs['parse_tree'] - self._name = name - self._parse_tree = parse_tree - self._defects = defects - - @property - def name(self): - return self._name - - @property - def defects(self): - return tuple(self._defects) - - def __reduce__(self): - return ( - _reconstruct_header, - ( - self.__class__.__name__, - self.__class__.__bases__, - str(self), - ), - self.__dict__) - - @classmethod - def _reconstruct(cls, value): - return str.__new__(cls, value) - - def fold(self, **_3to2kwargs): - policy = _3to2kwargs['policy']; del _3to2kwargs['policy'] - """Fold header according to policy. - - The parsed representation of the header is folded according to - RFC5322 rules, as modified by the policy. If the parse tree - contains surrogateescaped bytes, the bytes are CTE encoded using - the charset 'unknown-8bit". - - Any non-ASCII characters in the parse tree are CTE encoded using - charset utf-8. XXX: make this a policy setting. - - The returned value is an ASCII-only string possibly containing linesep - characters, and ending with a linesep character. The string includes - the header name and the ': ' separator. - - """ - # At some point we need to only put fws here if it was in the source. - header = parser.Header([ - parser.HeaderLabel([ - parser.ValueTerminal(self.name, 'header-name'), - parser.ValueTerminal(':', 'header-sep')]), - parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), - self._parse_tree]) - return header.fold(policy=policy) - - -def _reconstruct_header(cls_name, bases, value): - return type(text_to_native_str(cls_name), bases, {})._reconstruct(value) - - -class UnstructuredHeader(object): - - max_count = None - value_parser = staticmethod(parser.get_unstructured) - - @classmethod - def parse(cls, value, kwds): - kwds['parse_tree'] = cls.value_parser(value) - kwds['decoded'] = str(kwds['parse_tree']) - - -class UniqueUnstructuredHeader(UnstructuredHeader): - - max_count = 1 - - -class DateHeader(object): - - """Header whose value consists of a single timestamp. - - Provides an additional attribute, datetime, which is either an aware - datetime using a timezone, or a naive datetime if the timezone - in the input string is -0000. Also accepts a datetime as input. - The 'value' attribute is the normalized form of the timestamp, - which means it is the output of format_datetime on the datetime. - """ - - max_count = None - - # This is used only for folding, not for creating 'decoded'. - value_parser = staticmethod(parser.get_unstructured) - - @classmethod - def parse(cls, value, kwds): - if not value: - kwds['defects'].append(errors.HeaderMissingRequiredValue()) - kwds['datetime'] = None - kwds['decoded'] = '' - kwds['parse_tree'] = parser.TokenList() - return - if isinstance(value, str): - value = utils.parsedate_to_datetime(value) - kwds['datetime'] = value - kwds['decoded'] = utils.format_datetime(kwds['datetime']) - kwds['parse_tree'] = cls.value_parser(kwds['decoded']) - - def init(self, *args, **kw): - self._datetime = kw.pop('datetime') - super().init(*args, **kw) - - @property - def datetime(self): - return self._datetime - - -class UniqueDateHeader(DateHeader): - - max_count = 1 - - -class AddressHeader(object): - - max_count = None - - @staticmethod - def value_parser(value): - address_list, value = parser.get_address_list(value) - assert not value, 'this should not happen' - return address_list - - @classmethod - def parse(cls, value, kwds): - if isinstance(value, str): - # We are translating here from the RFC language (address/mailbox) - # to our API language (group/address). - kwds['parse_tree'] = address_list = cls.value_parser(value) - groups = [] - for addr in address_list.addresses: - groups.append(Group(addr.display_name, - [Address(mb.display_name or '', - mb.local_part or '', - mb.domain or '') - for mb in addr.all_mailboxes])) - defects = list(address_list.all_defects) - else: - # Assume it is Address/Group stuff - if not hasattr(value, '__iter__'): - value = [value] - groups = [Group(None, [item]) if not hasattr(item, 'addresses') - else item - for item in value] - defects = [] - kwds['groups'] = groups - kwds['defects'] = defects - kwds['decoded'] = ', '.join([str(item) for item in groups]) - if 'parse_tree' not in kwds: - kwds['parse_tree'] = cls.value_parser(kwds['decoded']) - - def init(self, *args, **kw): - self._groups = tuple(kw.pop('groups')) - self._addresses = None - super().init(*args, **kw) - - @property - def groups(self): - return self._groups - - @property - def addresses(self): - if self._addresses is None: - self._addresses = tuple([address for group in self._groups - for address in group.addresses]) - return self._addresses - - -class UniqueAddressHeader(AddressHeader): - - max_count = 1 - - -class SingleAddressHeader(AddressHeader): - - @property - def address(self): - if len(self.addresses)!=1: - raise ValueError(("value of single address header {} is not " - "a single address").format(self.name)) - return self.addresses[0] - - -class UniqueSingleAddressHeader(SingleAddressHeader): - - max_count = 1 - - -class MIMEVersionHeader(object): - - max_count = 1 - - value_parser = staticmethod(parser.parse_mime_version) - - @classmethod - def parse(cls, value, kwds): - kwds['parse_tree'] = parse_tree = cls.value_parser(value) - kwds['decoded'] = str(parse_tree) - kwds['defects'].extend(parse_tree.all_defects) - kwds['major'] = None if parse_tree.minor is None else parse_tree.major - kwds['minor'] = parse_tree.minor - if parse_tree.minor is not None: - kwds['version'] = '{}.{}'.format(kwds['major'], kwds['minor']) - else: - kwds['version'] = None - - def init(self, *args, **kw): - self._version = kw.pop('version') - self._major = kw.pop('major') - self._minor = kw.pop('minor') - super().init(*args, **kw) - - @property - def major(self): - return self._major - - @property - def minor(self): - return self._minor - - @property - def version(self): - return self._version - - -class ParameterizedMIMEHeader(object): - - # Mixin that handles the params dict. Must be subclassed and - # a property value_parser for the specific header provided. - - max_count = 1 - - @classmethod - def parse(cls, value, kwds): - kwds['parse_tree'] = parse_tree = cls.value_parser(value) - kwds['decoded'] = str(parse_tree) - kwds['defects'].extend(parse_tree.all_defects) - if parse_tree.params is None: - kwds['params'] = {} - else: - # The MIME RFCs specify that parameter ordering is arbitrary. - kwds['params'] = dict((utils._sanitize(name).lower(), - utils._sanitize(value)) - for name, value in parse_tree.params) - - def init(self, *args, **kw): - self._params = kw.pop('params') - super().init(*args, **kw) - - @property - def params(self): - return self._params.copy() - - -class ContentTypeHeader(ParameterizedMIMEHeader): - - value_parser = staticmethod(parser.parse_content_type_header) - - def init(self, *args, **kw): - super().init(*args, **kw) - self._maintype = utils._sanitize(self._parse_tree.maintype) - self._subtype = utils._sanitize(self._parse_tree.subtype) - - @property - def maintype(self): - return self._maintype - - @property - def subtype(self): - return self._subtype - - @property - def content_type(self): - return self.maintype + '/' + self.subtype - - -class ContentDispositionHeader(ParameterizedMIMEHeader): - - value_parser = staticmethod(parser.parse_content_disposition_header) - - def init(self, *args, **kw): - super().init(*args, **kw) - cd = self._parse_tree.content_disposition - self._content_disposition = cd if cd is None else utils._sanitize(cd) - - @property - def content_disposition(self): - return self._content_disposition - - -class ContentTransferEncodingHeader(object): - - max_count = 1 - - value_parser = staticmethod(parser.parse_content_transfer_encoding_header) - - @classmethod - def parse(cls, value, kwds): - kwds['parse_tree'] = parse_tree = cls.value_parser(value) - kwds['decoded'] = str(parse_tree) - kwds['defects'].extend(parse_tree.all_defects) - - def init(self, *args, **kw): - super().init(*args, **kw) - self._cte = utils._sanitize(self._parse_tree.cte) - - @property - def cte(self): - return self._cte - - -# The header factory # - -_default_header_map = { - 'subject': UniqueUnstructuredHeader, - 'date': UniqueDateHeader, - 'resent-date': DateHeader, - 'orig-date': UniqueDateHeader, - 'sender': UniqueSingleAddressHeader, - 'resent-sender': SingleAddressHeader, - 'to': UniqueAddressHeader, - 'resent-to': AddressHeader, - 'cc': UniqueAddressHeader, - 'resent-cc': AddressHeader, - 'bcc': UniqueAddressHeader, - 'resent-bcc': AddressHeader, - 'from': UniqueAddressHeader, - 'resent-from': AddressHeader, - 'reply-to': UniqueAddressHeader, - 'mime-version': MIMEVersionHeader, - 'content-type': ContentTypeHeader, - 'content-disposition': ContentDispositionHeader, - 'content-transfer-encoding': ContentTransferEncodingHeader, - } - -class HeaderRegistry(object): - - """A header_factory and header registry.""" - - def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader, - use_default_map=True): - """Create a header_factory that works with the Policy API. - - base_class is the class that will be the last class in the created - header class's __bases__ list. default_class is the class that will be - used if "name" (see __call__) does not appear in the registry. - use_default_map controls whether or not the default mapping of names to - specialized classes is copied in to the registry when the factory is - created. The default is True. - - """ - self.registry = {} - self.base_class = base_class - self.default_class = default_class - if use_default_map: - self.registry.update(_default_header_map) - - def map_to_type(self, name, cls): - """Register cls as the specialized class for handling "name" headers. - - """ - self.registry[name.lower()] = cls - - def __getitem__(self, name): - cls = self.registry.get(name.lower(), self.default_class) - return type(text_to_native_str('_'+cls.__name__), (cls, self.base_class), {}) - - def __call__(self, name, value): - """Create a header instance for header 'name' from 'value'. - - Creates a header instance by creating a specialized class for parsing - and representing the specified header by combining the factory - base_class with a specialized class from the registry or the - default_class, and passing the name and value to the constructed - class's constructor. - - """ - return self[name](name, value) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/iterators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/iterators.py deleted file mode 100755 index 82d320f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/iterators.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Various types of useful iterators and generators.""" -from __future__ import print_function -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -__all__ = [ - 'body_line_iterator', - 'typed_subpart_iterator', - 'walk', - # Do not include _structure() since it's part of the debugging API. - ] - -import sys -from io import StringIO - - -# This function will become a method of the Message class -def walk(self): - """Walk over the message tree, yielding each subpart. - - The walk is performed in depth-first order. This method is a - generator. - """ - yield self - if self.is_multipart(): - for subpart in self.get_payload(): - for subsubpart in subpart.walk(): - yield subsubpart - - -# These two functions are imported into the Iterators.py interface module. -def body_line_iterator(msg, decode=False): - """Iterate over the parts, returning string payloads line-by-line. - - Optional decode (default False) is passed through to .get_payload(). - """ - for subpart in msg.walk(): - payload = subpart.get_payload(decode=decode) - if isinstance(payload, str): - for line in StringIO(payload): - yield line - - -def typed_subpart_iterator(msg, maintype='text', subtype=None): - """Iterate over the subparts with a given MIME type. - - Use `maintype' as the main MIME type to match against; this defaults to - "text". Optional `subtype' is the MIME subtype to match against; if - omitted, only the main type is matched. - """ - for subpart in msg.walk(): - if subpart.get_content_maintype() == maintype: - if subtype is None or subpart.get_content_subtype() == subtype: - yield subpart - - -def _structure(msg, fp=None, level=0, include_default=False): - """A handy debugging aid""" - if fp is None: - fp = sys.stdout - tab = ' ' * (level * 4) - print(tab + msg.get_content_type(), end='', file=fp) - if include_default: - print(' [%s]' % msg.get_default_type(), file=fp) - else: - print(file=fp) - if msg.is_multipart(): - for subpart in msg.get_payload(): - _structure(subpart, fp, level+1, include_default) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/message.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/message.py deleted file mode 100755 index d8d9615..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/message.py +++ /dev/null @@ -1,882 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2001-2007 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Basic message object for the email package object model.""" -from __future__ import absolute_import, division, unicode_literals -from future.builtins import list, range, str, zip - -__all__ = ['Message'] - -import re -import uu -import base64 -import binascii -from io import BytesIO, StringIO - -# Intrapackage imports -from future.utils import as_native_str -from future.backports.email import utils -from future.backports.email import errors -from future.backports.email._policybase import compat32 -from future.backports.email import charset as _charset -from future.backports.email._encoded_words import decode_b -Charset = _charset.Charset - -SEMISPACE = '; ' - -# Regular expression that matches `special' characters in parameters, the -# existence of which force quoting of the parameter value. -tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') - - -def _splitparam(param): - # Split header parameters. BAW: this may be too simple. It isn't - # strictly RFC 2045 (section 5.1) compliant, but it catches most headers - # found in the wild. We may eventually need a full fledged parser. - # RDM: we might have a Header here; for now just stringify it. - a, sep, b = str(param).partition(';') - if not sep: - return a.strip(), None - return a.strip(), b.strip() - -def _formatparam(param, value=None, quote=True): - """Convenience function to format and return a key=value pair. - - This will quote the value if needed or if quote is true. If value is a - three tuple (charset, language, value), it will be encoded according - to RFC2231 rules. If it contains non-ascii characters it will likewise - be encoded according to RFC2231 rules, using the utf-8 charset and - a null language. - """ - if value is not None and len(value) > 0: - # A tuple is used for RFC 2231 encoded parameter values where items - # are (charset, language, value). charset is a string, not a Charset - # instance. RFC 2231 encoded values are never quoted, per RFC. - if isinstance(value, tuple): - # Encode as per RFC 2231 - param += '*' - value = utils.encode_rfc2231(value[2], value[0], value[1]) - return '%s=%s' % (param, value) - else: - try: - value.encode('ascii') - except UnicodeEncodeError: - param += '*' - value = utils.encode_rfc2231(value, 'utf-8', '') - return '%s=%s' % (param, value) - # BAW: Please check this. I think that if quote is set it should - # force quoting even if not necessary. - if quote or tspecials.search(value): - return '%s="%s"' % (param, utils.quote(value)) - else: - return '%s=%s' % (param, value) - else: - return param - -def _parseparam(s): - # RDM This might be a Header, so for now stringify it. - s = ';' + str(s) - plist = [] - while s[:1] == ';': - s = s[1:] - end = s.find(';') - while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: - end = s.find(';', end + 1) - if end < 0: - end = len(s) - f = s[:end] - if '=' in f: - i = f.index('=') - f = f[:i].strip().lower() + '=' + f[i+1:].strip() - plist.append(f.strip()) - s = s[end:] - return plist - - -def _unquotevalue(value): - # This is different than utils.collapse_rfc2231_value() because it doesn't - # try to convert the value to a unicode. Message.get_param() and - # Message.get_params() are both currently defined to return the tuple in - # the face of RFC 2231 parameters. - if isinstance(value, tuple): - return value[0], value[1], utils.unquote(value[2]) - else: - return utils.unquote(value) - - -class Message(object): - """Basic message object. - - A message object is defined as something that has a bunch of RFC 2822 - headers and a payload. It may optionally have an envelope header - (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a - multipart or a message/rfc822), then the payload is a list of Message - objects, otherwise it is a string. - - Message objects implement part of the `mapping' interface, which assumes - there is exactly one occurrence of the header per message. Some headers - do in fact appear multiple times (e.g. Received) and for those headers, - you must use the explicit API to set or get all the headers. Not all of - the mapping methods are implemented. - """ - def __init__(self, policy=compat32): - self.policy = policy - self._headers = list() - self._unixfrom = None - self._payload = None - self._charset = None - # Defaults for multipart messages - self.preamble = self.epilogue = None - self.defects = [] - # Default content type - self._default_type = 'text/plain' - - @as_native_str(encoding='utf-8') - def __str__(self): - """Return the entire formatted message as a string. - This includes the headers, body, and envelope header. - """ - return self.as_string() - - def as_string(self, unixfrom=False, maxheaderlen=0): - """Return the entire formatted message as a (unicode) string. - Optional `unixfrom' when True, means include the Unix From_ envelope - header. - - This is a convenience method and may not generate the message exactly - as you intend. For more flexibility, use the flatten() method of a - Generator instance. - """ - from future.backports.email.generator import Generator - fp = StringIO() - g = Generator(fp, mangle_from_=False, maxheaderlen=maxheaderlen) - g.flatten(self, unixfrom=unixfrom) - return fp.getvalue() - - def is_multipart(self): - """Return True if the message consists of multiple parts.""" - return isinstance(self._payload, list) - - # - # Unix From_ line - # - def set_unixfrom(self, unixfrom): - self._unixfrom = unixfrom - - def get_unixfrom(self): - return self._unixfrom - - # - # Payload manipulation. - # - def attach(self, payload): - """Add the given payload to the current payload. - - The current payload will always be a list of objects after this method - is called. If you want to set the payload to a scalar object, use - set_payload() instead. - """ - if self._payload is None: - self._payload = [payload] - else: - self._payload.append(payload) - - def get_payload(self, i=None, decode=False): - """Return a reference to the payload. - - The payload will either be a list object or a string. If you mutate - the list object, you modify the message's payload in place. Optional - i returns that index into the payload. - - Optional decode is a flag indicating whether the payload should be - decoded or not, according to the Content-Transfer-Encoding header - (default is False). - - When True and the message is not a multipart, the payload will be - decoded if this header's value is `quoted-printable' or `base64'. If - some other encoding is used, or the header is missing, or if the - payload has bogus data (i.e. bogus base64 or uuencoded data), the - payload is returned as-is. - - If the message is a multipart and the decode flag is True, then None - is returned. - """ - # Here is the logic table for this code, based on the email5.0.0 code: - # i decode is_multipart result - # ------ ------ ------------ ------------------------------ - # None True True None - # i True True None - # None False True _payload (a list) - # i False True _payload element i (a Message) - # i False False error (not a list) - # i True False error (not a list) - # None False False _payload - # None True False _payload decoded (bytes) - # Note that Barry planned to factor out the 'decode' case, but that - # isn't so easy now that we handle the 8 bit data, which needs to be - # converted in both the decode and non-decode path. - if self.is_multipart(): - if decode: - return None - if i is None: - return self._payload - else: - return self._payload[i] - # For backward compatibility, Use isinstance and this error message - # instead of the more logical is_multipart test. - if i is not None and not isinstance(self._payload, list): - raise TypeError('Expected list, got %s' % type(self._payload)) - payload = self._payload - # cte might be a Header, so for now stringify it. - cte = str(self.get('content-transfer-encoding', '')).lower() - # payload may be bytes here. - if isinstance(payload, str): - payload = str(payload) # for Python-Future, so surrogateescape works - if utils._has_surrogates(payload): - bpayload = payload.encode('ascii', 'surrogateescape') - if not decode: - try: - payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace') - except LookupError: - payload = bpayload.decode('ascii', 'replace') - elif decode: - try: - bpayload = payload.encode('ascii') - except UnicodeError: - # This won't happen for RFC compliant messages (messages - # containing only ASCII codepoints in the unicode input). - # If it does happen, turn the string into bytes in a way - # guaranteed not to fail. - bpayload = payload.encode('raw-unicode-escape') - if not decode: - return payload - if cte == 'quoted-printable': - return utils._qdecode(bpayload) - elif cte == 'base64': - # XXX: this is a bit of a hack; decode_b should probably be factored - # out somewhere, but I haven't figured out where yet. - value, defects = decode_b(b''.join(bpayload.splitlines())) - for defect in defects: - self.policy.handle_defect(self, defect) - return value - elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): - in_file = BytesIO(bpayload) - out_file = BytesIO() - try: - uu.decode(in_file, out_file, quiet=True) - return out_file.getvalue() - except uu.Error: - # Some decoding problem - return bpayload - if isinstance(payload, str): - return bpayload - return payload - - def set_payload(self, payload, charset=None): - """Set the payload to the given value. - - Optional charset sets the message's default character set. See - set_charset() for details. - """ - self._payload = payload - if charset is not None: - self.set_charset(charset) - - def set_charset(self, charset): - """Set the charset of the payload to a given character set. - - charset can be a Charset instance, a string naming a character set, or - None. If it is a string it will be converted to a Charset instance. - If charset is None, the charset parameter will be removed from the - Content-Type field. Anything else will generate a TypeError. - - The message will be assumed to be of type text/* encoded with - charset.input_charset. It will be converted to charset.output_charset - and encoded properly, if needed, when generating the plain text - representation of the message. MIME headers (MIME-Version, - Content-Type, Content-Transfer-Encoding) will be added as needed. - """ - if charset is None: - self.del_param('charset') - self._charset = None - return - if not isinstance(charset, Charset): - charset = Charset(charset) - self._charset = charset - if 'MIME-Version' not in self: - self.add_header('MIME-Version', '1.0') - if 'Content-Type' not in self: - self.add_header('Content-Type', 'text/plain', - charset=charset.get_output_charset()) - else: - self.set_param('charset', charset.get_output_charset()) - if charset != charset.get_output_charset(): - self._payload = charset.body_encode(self._payload) - if 'Content-Transfer-Encoding' not in self: - cte = charset.get_body_encoding() - try: - cte(self) - except TypeError: - self._payload = charset.body_encode(self._payload) - self.add_header('Content-Transfer-Encoding', cte) - - def get_charset(self): - """Return the Charset instance associated with the message's payload. - """ - return self._charset - - # - # MAPPING INTERFACE (partial) - # - def __len__(self): - """Return the total number of headers, including duplicates.""" - return len(self._headers) - - def __getitem__(self, name): - """Get a header value. - - Return None if the header is missing instead of raising an exception. - - Note that if the header appeared multiple times, exactly which - occurrence gets returned is undefined. Use get_all() to get all - the values matching a header field name. - """ - return self.get(name) - - def __setitem__(self, name, val): - """Set the value of a header. - - Note: this does not overwrite an existing header with the same field - name. Use __delitem__() first to delete any existing headers. - """ - max_count = self.policy.header_max_count(name) - if max_count: - lname = name.lower() - found = 0 - for k, v in self._headers: - if k.lower() == lname: - found += 1 - if found >= max_count: - raise ValueError("There may be at most {} {} headers " - "in a message".format(max_count, name)) - self._headers.append(self.policy.header_store_parse(name, val)) - - def __delitem__(self, name): - """Delete all occurrences of a header, if present. - - Does not raise an exception if the header is missing. - """ - name = name.lower() - newheaders = list() - for k, v in self._headers: - if k.lower() != name: - newheaders.append((k, v)) - self._headers = newheaders - - def __contains__(self, name): - return name.lower() in [k.lower() for k, v in self._headers] - - def __iter__(self): - for field, value in self._headers: - yield field - - def keys(self): - """Return a list of all the message's header field names. - - These will be sorted in the order they appeared in the original - message, or were added to the message, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [k for k, v in self._headers] - - def values(self): - """Return a list of all the message's header values. - - These will be sorted in the order they appeared in the original - message, or were added to the message, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [self.policy.header_fetch_parse(k, v) - for k, v in self._headers] - - def items(self): - """Get all the message's header fields and values. - - These will be sorted in the order they appeared in the original - message, or were added to the message, and may contain duplicates. - Any fields deleted and re-inserted are always appended to the header - list. - """ - return [(k, self.policy.header_fetch_parse(k, v)) - for k, v in self._headers] - - def get(self, name, failobj=None): - """Get a header value. - - Like __getitem__() but return failobj instead of None when the field - is missing. - """ - name = name.lower() - for k, v in self._headers: - if k.lower() == name: - return self.policy.header_fetch_parse(k, v) - return failobj - - # - # "Internal" methods (public API, but only intended for use by a parser - # or generator, not normal application code. - # - - def set_raw(self, name, value): - """Store name and value in the model without modification. - - This is an "internal" API, intended only for use by a parser. - """ - self._headers.append((name, value)) - - def raw_items(self): - """Return the (name, value) header pairs without modification. - - This is an "internal" API, intended only for use by a generator. - """ - return iter(self._headers.copy()) - - # - # Additional useful stuff - # - - def get_all(self, name, failobj=None): - """Return a list of all the values for the named field. - - These will be sorted in the order they appeared in the original - message, and may contain duplicates. Any fields deleted and - re-inserted are always appended to the header list. - - If no such fields exist, failobj is returned (defaults to None). - """ - values = [] - name = name.lower() - for k, v in self._headers: - if k.lower() == name: - values.append(self.policy.header_fetch_parse(k, v)) - if not values: - return failobj - return values - - def add_header(self, _name, _value, **_params): - """Extended header setting. - - name is the header field to add. keyword arguments can be used to set - additional parameters for the header field, with underscores converted - to dashes. Normally the parameter will be added as key="value" unless - value is None, in which case only the key will be added. If a - parameter value contains non-ASCII characters it can be specified as a - three-tuple of (charset, language, value), in which case it will be - encoded according to RFC2231 rules. Otherwise it will be encoded using - the utf-8 charset and a language of ''. - - Examples: - - msg.add_header('content-disposition', 'attachment', filename='bud.gif') - msg.add_header('content-disposition', 'attachment', - filename=('utf-8', '', 'Fußballer.ppt')) - msg.add_header('content-disposition', 'attachment', - filename='Fußballer.ppt')) - """ - parts = [] - for k, v in _params.items(): - if v is None: - parts.append(k.replace('_', '-')) - else: - parts.append(_formatparam(k.replace('_', '-'), v)) - if _value is not None: - parts.insert(0, _value) - self[_name] = SEMISPACE.join(parts) - - def replace_header(self, _name, _value): - """Replace a header. - - Replace the first matching header found in the message, retaining - header order and case. If no matching header was found, a KeyError is - raised. - """ - _name = _name.lower() - for i, (k, v) in zip(range(len(self._headers)), self._headers): - if k.lower() == _name: - self._headers[i] = self.policy.header_store_parse(k, _value) - break - else: - raise KeyError(_name) - - # - # Use these three methods instead of the three above. - # - - def get_content_type(self): - """Return the message's content type. - - The returned string is coerced to lower case of the form - `maintype/subtype'. If there was no Content-Type header in the - message, the default type as given by get_default_type() will be - returned. Since according to RFC 2045, messages always have a default - type this will always return a value. - - RFC 2045 defines a message's default type to be text/plain unless it - appears inside a multipart/digest container, in which case it would be - message/rfc822. - """ - missing = object() - value = self.get('content-type', missing) - if value is missing: - # This should have no parameters - return self.get_default_type() - ctype = _splitparam(value)[0].lower() - # RFC 2045, section 5.2 says if its invalid, use text/plain - if ctype.count('/') != 1: - return 'text/plain' - return ctype - - def get_content_maintype(self): - """Return the message's main content type. - - This is the `maintype' part of the string returned by - get_content_type(). - """ - ctype = self.get_content_type() - return ctype.split('/')[0] - - def get_content_subtype(self): - """Returns the message's sub-content type. - - This is the `subtype' part of the string returned by - get_content_type(). - """ - ctype = self.get_content_type() - return ctype.split('/')[1] - - def get_default_type(self): - """Return the `default' content type. - - Most messages have a default content type of text/plain, except for - messages that are subparts of multipart/digest containers. Such - subparts have a default content type of message/rfc822. - """ - return self._default_type - - def set_default_type(self, ctype): - """Set the `default' content type. - - ctype should be either "text/plain" or "message/rfc822", although this - is not enforced. The default content type is not stored in the - Content-Type header. - """ - self._default_type = ctype - - def _get_params_preserve(self, failobj, header): - # Like get_params() but preserves the quoting of values. BAW: - # should this be part of the public interface? - missing = object() - value = self.get(header, missing) - if value is missing: - return failobj - params = [] - for p in _parseparam(value): - try: - name, val = p.split('=', 1) - name = name.strip() - val = val.strip() - except ValueError: - # Must have been a bare attribute - name = p.strip() - val = '' - params.append((name, val)) - params = utils.decode_params(params) - return params - - def get_params(self, failobj=None, header='content-type', unquote=True): - """Return the message's Content-Type parameters, as a list. - - The elements of the returned list are 2-tuples of key/value pairs, as - split on the `=' sign. The left hand side of the `=' is the key, - while the right hand side is the value. If there is no `=' sign in - the parameter the value is the empty string. The value is as - described in the get_param() method. - - Optional failobj is the object to return if there is no Content-Type - header. Optional header is the header to search instead of - Content-Type. If unquote is True, the value is unquoted. - """ - missing = object() - params = self._get_params_preserve(missing, header) - if params is missing: - return failobj - if unquote: - return [(k, _unquotevalue(v)) for k, v in params] - else: - return params - - def get_param(self, param, failobj=None, header='content-type', - unquote=True): - """Return the parameter value if found in the Content-Type header. - - Optional failobj is the object to return if there is no Content-Type - header, or the Content-Type header has no such parameter. Optional - header is the header to search instead of Content-Type. - - Parameter keys are always compared case insensitively. The return - value can either be a string, or a 3-tuple if the parameter was RFC - 2231 encoded. When it's a 3-tuple, the elements of the value are of - the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and - LANGUAGE can be None, in which case you should consider VALUE to be - encoded in the us-ascii charset. You can usually ignore LANGUAGE. - The parameter value (either the returned string, or the VALUE item in - the 3-tuple) is always unquoted, unless unquote is set to False. - - If your application doesn't care whether the parameter was RFC 2231 - encoded, it can turn the return value into a string as follows: - - param = msg.get_param('foo') - param = email.utils.collapse_rfc2231_value(rawparam) - - """ - if header not in self: - return failobj - for k, v in self._get_params_preserve(failobj, header): - if k.lower() == param.lower(): - if unquote: - return _unquotevalue(v) - else: - return v - return failobj - - def set_param(self, param, value, header='Content-Type', requote=True, - charset=None, language=''): - """Set a parameter in the Content-Type header. - - If the parameter already exists in the header, its value will be - replaced with the new value. - - If header is Content-Type and has not yet been defined for this - message, it will be set to "text/plain" and the new parameter and - value will be appended as per RFC 2045. - - An alternate header can specified in the header argument, and all - parameters will be quoted as necessary unless requote is False. - - If charset is specified, the parameter will be encoded according to RFC - 2231. Optional language specifies the RFC 2231 language, defaulting - to the empty string. Both charset and language should be strings. - """ - if not isinstance(value, tuple) and charset: - value = (charset, language, value) - - if header not in self and header.lower() == 'content-type': - ctype = 'text/plain' - else: - ctype = self.get(header) - if not self.get_param(param, header=header): - if not ctype: - ctype = _formatparam(param, value, requote) - else: - ctype = SEMISPACE.join( - [ctype, _formatparam(param, value, requote)]) - else: - ctype = '' - for old_param, old_value in self.get_params(header=header, - unquote=requote): - append_param = '' - if old_param.lower() == param.lower(): - append_param = _formatparam(param, value, requote) - else: - append_param = _formatparam(old_param, old_value, requote) - if not ctype: - ctype = append_param - else: - ctype = SEMISPACE.join([ctype, append_param]) - if ctype != self.get(header): - del self[header] - self[header] = ctype - - def del_param(self, param, header='content-type', requote=True): - """Remove the given parameter completely from the Content-Type header. - - The header will be re-written in place without the parameter or its - value. All values will be quoted as necessary unless requote is - False. Optional header specifies an alternative to the Content-Type - header. - """ - if header not in self: - return - new_ctype = '' - for p, v in self.get_params(header=header, unquote=requote): - if p.lower() != param.lower(): - if not new_ctype: - new_ctype = _formatparam(p, v, requote) - else: - new_ctype = SEMISPACE.join([new_ctype, - _formatparam(p, v, requote)]) - if new_ctype != self.get(header): - del self[header] - self[header] = new_ctype - - def set_type(self, type, header='Content-Type', requote=True): - """Set the main type and subtype for the Content-Type header. - - type must be a string in the form "maintype/subtype", otherwise a - ValueError is raised. - - This method replaces the Content-Type header, keeping all the - parameters in place. If requote is False, this leaves the existing - header's quoting as is. Otherwise, the parameters will be quoted (the - default). - - An alternative header can be specified in the header argument. When - the Content-Type header is set, we'll always also add a MIME-Version - header. - """ - # BAW: should we be strict? - if not type.count('/') == 1: - raise ValueError - # Set the Content-Type, you get a MIME-Version - if header.lower() == 'content-type': - del self['mime-version'] - self['MIME-Version'] = '1.0' - if header not in self: - self[header] = type - return - params = self.get_params(header=header, unquote=requote) - del self[header] - self[header] = type - # Skip the first param; it's the old type. - for p, v in params[1:]: - self.set_param(p, v, header, requote) - - def get_filename(self, failobj=None): - """Return the filename associated with the payload if present. - - The filename is extracted from the Content-Disposition header's - `filename' parameter, and it is unquoted. If that header is missing - the `filename' parameter, this method falls back to looking for the - `name' parameter. - """ - missing = object() - filename = self.get_param('filename', missing, 'content-disposition') - if filename is missing: - filename = self.get_param('name', missing, 'content-type') - if filename is missing: - return failobj - return utils.collapse_rfc2231_value(filename).strip() - - def get_boundary(self, failobj=None): - """Return the boundary associated with the payload if present. - - The boundary is extracted from the Content-Type header's `boundary' - parameter, and it is unquoted. - """ - missing = object() - boundary = self.get_param('boundary', missing) - if boundary is missing: - return failobj - # RFC 2046 says that boundaries may begin but not end in w/s - return utils.collapse_rfc2231_value(boundary).rstrip() - - def set_boundary(self, boundary): - """Set the boundary parameter in Content-Type to 'boundary'. - - This is subtly different than deleting the Content-Type header and - adding a new one with a new boundary parameter via add_header(). The - main difference is that using the set_boundary() method preserves the - order of the Content-Type header in the original message. - - HeaderParseError is raised if the message has no Content-Type header. - """ - missing = object() - params = self._get_params_preserve(missing, 'content-type') - if params is missing: - # There was no Content-Type header, and we don't know what type - # to set it to, so raise an exception. - raise errors.HeaderParseError('No Content-Type header found') - newparams = list() - foundp = False - for pk, pv in params: - if pk.lower() == 'boundary': - newparams.append(('boundary', '"%s"' % boundary)) - foundp = True - else: - newparams.append((pk, pv)) - if not foundp: - # The original Content-Type header had no boundary attribute. - # Tack one on the end. BAW: should we raise an exception - # instead??? - newparams.append(('boundary', '"%s"' % boundary)) - # Replace the existing Content-Type header with the new value - newheaders = list() - for h, v in self._headers: - if h.lower() == 'content-type': - parts = list() - for k, v in newparams: - if v == '': - parts.append(k) - else: - parts.append('%s=%s' % (k, v)) - val = SEMISPACE.join(parts) - newheaders.append(self.policy.header_store_parse(h, val)) - - else: - newheaders.append((h, v)) - self._headers = newheaders - - def get_content_charset(self, failobj=None): - """Return the charset parameter of the Content-Type header. - - The returned string is always coerced to lower case. If there is no - Content-Type header, or if that header has no charset parameter, - failobj is returned. - """ - missing = object() - charset = self.get_param('charset', missing) - if charset is missing: - return failobj - if isinstance(charset, tuple): - # RFC 2231 encoded, so decode it, and it better end up as ascii. - pcharset = charset[0] or 'us-ascii' - try: - # LookupError will be raised if the charset isn't known to - # Python. UnicodeError will be raised if the encoded text - # contains a character not in the charset. - as_bytes = charset[2].encode('raw-unicode-escape') - charset = str(as_bytes, pcharset) - except (LookupError, UnicodeError): - charset = charset[2] - # charset characters must be in us-ascii range - try: - charset.encode('us-ascii') - except UnicodeError: - return failobj - # RFC 2046, $4.1.2 says charsets are not case sensitive - return charset.lower() - - def get_charsets(self, failobj=None): - """Return a list containing the charset(s) used in this message. - - The returned list of items describes the Content-Type headers' - charset parameter for this message and all the subparts in its - payload. - - Each item will either be a string (the value of the charset parameter - in the Content-Type header of that part) or the value of the - 'failobj' parameter (defaults to None), if the part does not have a - main MIME type of "text", or the charset is not defined. - - The list will contain one string for each part of the message, plus - one for the container message (i.e. self), so that a non-multipart - message will still return a list of length 1. - """ - return [part.get_content_charset(failobj) for part in self.walk()] - - # I.e. def walk(self): ... - from future.backports.email.iterators import walk diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/application.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/application.py deleted file mode 100755 index 5cbfb17..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/application.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Keith Dart -# Contact: email-sig@python.org - -"""Class representing application/* type MIME documents.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -from future.backports.email import encoders -from future.backports.email.mime.nonmultipart import MIMENonMultipart - -__all__ = ["MIMEApplication"] - - -class MIMEApplication(MIMENonMultipart): - """Class for generating application/* MIME documents.""" - - def __init__(self, _data, _subtype='octet-stream', - _encoder=encoders.encode_base64, **_params): - """Create an application/* type MIME document. - - _data is a string containing the raw application data. - - _subtype is the MIME content type subtype, defaulting to - 'octet-stream'. - - _encoder is a function which will perform the actual encoding for - transport of the application data, defaulting to base64 encoding. - - Any additional keyword arguments are passed to the base class - constructor, which turns them into parameters on the Content-Type - header. - """ - if _subtype is None: - raise TypeError('Invalid application MIME subtype') - MIMENonMultipart.__init__(self, 'application', _subtype, **_params) - self.set_payload(_data) - _encoder(self) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/audio.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/audio.py deleted file mode 100755 index 4989c11..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/audio.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2001-2007 Python Software Foundation -# Author: Anthony Baxter -# Contact: email-sig@python.org - -"""Class representing audio/* type MIME documents.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -__all__ = ['MIMEAudio'] - -import sndhdr - -from io import BytesIO -from future.backports.email import encoders -from future.backports.email.mime.nonmultipart import MIMENonMultipart - - -_sndhdr_MIMEmap = {'au' : 'basic', - 'wav' :'x-wav', - 'aiff':'x-aiff', - 'aifc':'x-aiff', - } - -# There are others in sndhdr that don't have MIME types. :( -# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma?? -def _whatsnd(data): - """Try to identify a sound file type. - - sndhdr.what() has a pretty cruddy interface, unfortunately. This is why - we re-do it here. It would be easier to reverse engineer the Unix 'file' - command and use the standard 'magic' file, as shipped with a modern Unix. - """ - hdr = data[:512] - fakefile = BytesIO(hdr) - for testfn in sndhdr.tests: - res = testfn(hdr, fakefile) - if res is not None: - return _sndhdr_MIMEmap.get(res[0]) - return None - - -class MIMEAudio(MIMENonMultipart): - """Class for generating audio/* MIME documents.""" - - def __init__(self, _audiodata, _subtype=None, - _encoder=encoders.encode_base64, **_params): - """Create an audio/* type MIME document. - - _audiodata is a string containing the raw audio data. If this data - can be decoded by the standard Python `sndhdr' module, then the - subtype will be automatically included in the Content-Type header. - Otherwise, you can specify the specific audio subtype via the - _subtype parameter. If _subtype is not given, and no subtype can be - guessed, a TypeError is raised. - - _encoder is a function which will perform the actual encoding for - transport of the image data. It takes one argument, which is this - Image instance. It should use get_payload() and set_payload() to - change the payload to the encoded form. It should also add any - Content-Transfer-Encoding or other headers to the message as - necessary. The default encoding is Base64. - - Any additional keyword arguments are passed to the base class - constructor, which turns them into parameters on the Content-Type - header. - """ - if _subtype is None: - _subtype = _whatsnd(_audiodata) - if _subtype is None: - raise TypeError('Could not find audio MIME subtype') - MIMENonMultipart.__init__(self, 'audio', _subtype, **_params) - self.set_payload(_audiodata) - _encoder(self) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/base.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/base.py deleted file mode 100755 index e77f3ca..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/base.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Base class for MIME specializations.""" -from __future__ import absolute_import, division, unicode_literals -from future.backports.email import message - -__all__ = ['MIMEBase'] - - -class MIMEBase(message.Message): - """Base class for MIME specializations.""" - - def __init__(self, _maintype, _subtype, **_params): - """This constructor adds a Content-Type: and a MIME-Version: header. - - The Content-Type: header is taken from the _maintype and _subtype - arguments. Additional parameters for this header are taken from the - keyword arguments. - """ - message.Message.__init__(self) - ctype = '%s/%s' % (_maintype, _subtype) - self.add_header('Content-Type', ctype, **_params) - self['MIME-Version'] = '1.0' diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/image.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/image.py deleted file mode 100755 index a036024..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/image.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Class representing image/* type MIME documents.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -__all__ = ['MIMEImage'] - -import imghdr - -from future.backports.email import encoders -from future.backports.email.mime.nonmultipart import MIMENonMultipart - - -class MIMEImage(MIMENonMultipart): - """Class for generating image/* type MIME documents.""" - - def __init__(self, _imagedata, _subtype=None, - _encoder=encoders.encode_base64, **_params): - """Create an image/* type MIME document. - - _imagedata is a string containing the raw image data. If this data - can be decoded by the standard Python `imghdr' module, then the - subtype will be automatically included in the Content-Type header. - Otherwise, you can specify the specific image subtype via the _subtype - parameter. - - _encoder is a function which will perform the actual encoding for - transport of the image data. It takes one argument, which is this - Image instance. It should use get_payload() and set_payload() to - change the payload to the encoded form. It should also add any - Content-Transfer-Encoding or other headers to the message as - necessary. The default encoding is Base64. - - Any additional keyword arguments are passed to the base class - constructor, which turns them into parameters on the Content-Type - header. - """ - if _subtype is None: - _subtype = imghdr.what(None, _imagedata) - if _subtype is None: - raise TypeError('Could not guess image MIME subtype') - MIMENonMultipart.__init__(self, 'image', _subtype, **_params) - self.set_payload(_imagedata) - _encoder(self) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/message.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/message.py deleted file mode 100755 index 7f92075..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/message.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Class representing message/* MIME documents.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -__all__ = ['MIMEMessage'] - -from future.backports.email import message -from future.backports.email.mime.nonmultipart import MIMENonMultipart - - -class MIMEMessage(MIMENonMultipart): - """Class representing message/* MIME documents.""" - - def __init__(self, _msg, _subtype='rfc822'): - """Create a message/* type MIME document. - - _msg is a message object and must be an instance of Message, or a - derived class of Message, otherwise a TypeError is raised. - - Optional _subtype defines the subtype of the contained message. The - default is "rfc822" (this is defined by the MIME standard, even though - the term "rfc822" is technically outdated by RFC 2822). - """ - MIMENonMultipart.__init__(self, 'message', _subtype) - if not isinstance(_msg, message.Message): - raise TypeError('Argument is not an instance of Message') - # It's convenient to use this base class method. We need to do it - # this way or we'll get an exception - message.Message.attach(self, _msg) - # And be sure our default type is set correctly - self.set_default_type('message/rfc822') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/multipart.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/multipart.py deleted file mode 100755 index 6d7ed3d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/multipart.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2002-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Base class for MIME multipart/* type messages.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -__all__ = ['MIMEMultipart'] - -from future.backports.email.mime.base import MIMEBase - - -class MIMEMultipart(MIMEBase): - """Base class for MIME multipart/* type messages.""" - - def __init__(self, _subtype='mixed', boundary=None, _subparts=None, - **_params): - """Creates a multipart/* type message. - - By default, creates a multipart/mixed message, with proper - Content-Type and MIME-Version headers. - - _subtype is the subtype of the multipart content type, defaulting to - `mixed'. - - boundary is the multipart boundary string. By default it is - calculated as needed. - - _subparts is a sequence of initial subparts for the payload. It - must be an iterable object, such as a list. You can always - attach new subparts to the message by using the attach() method. - - Additional parameters for the Content-Type header are taken from the - keyword arguments (or passed into the _params argument). - """ - MIMEBase.__init__(self, 'multipart', _subtype, **_params) - - # Initialise _payload to an empty list as the Message superclass's - # implementation of is_multipart assumes that _payload is a list for - # multipart messages. - self._payload = [] - - if _subparts: - for p in _subparts: - self.attach(p) - if boundary: - self.set_boundary(boundary) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/nonmultipart.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/nonmultipart.py deleted file mode 100755 index 08c37c3..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/nonmultipart.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2002-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Base class for MIME type messages that are not multipart.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -__all__ = ['MIMENonMultipart'] - -from future.backports.email import errors -from future.backports.email.mime.base import MIMEBase - - -class MIMENonMultipart(MIMEBase): - """Base class for MIME multipart/* type messages.""" - - def attach(self, payload): - # The public API prohibits attaching multiple subparts to MIMEBase - # derived subtypes since none of them are, by definition, of content - # type multipart/* - raise errors.MultipartConversionError( - 'Cannot attach additional subparts to non-multipart/*') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/text.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/text.py deleted file mode 100755 index 6269f4a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/mime/text.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Class representing text/* type MIME documents.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -__all__ = ['MIMEText'] - -from future.backports.email.encoders import encode_7or8bit -from future.backports.email.mime.nonmultipart import MIMENonMultipart - - -class MIMEText(MIMENonMultipart): - """Class for generating text/* type MIME documents.""" - - def __init__(self, _text, _subtype='plain', _charset=None): - """Create a text/* type MIME document. - - _text is the string for this message object. - - _subtype is the MIME sub content type, defaulting to "plain". - - _charset is the character set parameter added to the Content-Type - header. This defaults to "us-ascii". Note that as a side-effect, the - Content-Transfer-Encoding header will also be set. - """ - - # If no _charset was specified, check to see if there are non-ascii - # characters present. If not, use 'us-ascii', otherwise use utf-8. - # XXX: This can be removed once #7304 is fixed. - if _charset is None: - try: - _text.encode('us-ascii') - _charset = 'us-ascii' - except UnicodeEncodeError: - _charset = 'utf-8' - - MIMENonMultipart.__init__(self, 'text', _subtype, - **{'charset': _charset}) - - self.set_payload(_text, _charset) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/parser.py deleted file mode 100755 index df1c6e2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/parser.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (C) 2001-2007 Python Software Foundation -# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter -# Contact: email-sig@python.org - -"""A parser of RFC 2822 and MIME email messages.""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import - -__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser'] - -import warnings -from io import StringIO, TextIOWrapper - -from future.backports.email.feedparser import FeedParser, BytesFeedParser -from future.backports.email.message import Message -from future.backports.email._policybase import compat32 - - -class Parser(object): - def __init__(self, _class=Message, **_3to2kwargs): - """Parser of RFC 2822 and MIME email messages. - - Creates an in-memory object tree representing the email message, which - can then be manipulated and turned over to a Generator to return the - textual representation of the message. - - The string must be formatted as a block of RFC 2822 headers and header - continuation lines, optionally preceeded by a `Unix-from' header. The - header block is terminated either by the end of the string or by a - blank line. - - _class is the class to instantiate for new message objects when they - must be created. This class must have a constructor that can take - zero arguments. Default is Message.Message. - - The policy keyword specifies a policy object that controls a number of - aspects of the parser's operation. The default policy maintains - backward compatibility. - - """ - if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy'] - else: policy = compat32 - self._class = _class - self.policy = policy - - def parse(self, fp, headersonly=False): - """Create a message structure from the data in a file. - - Reads all the data from the file and returns the root of the message - structure. Optional headersonly is a flag specifying whether to stop - parsing after reading the headers or not. The default is False, - meaning it parses the entire contents of the file. - """ - feedparser = FeedParser(self._class, policy=self.policy) - if headersonly: - feedparser._set_headersonly() - while True: - data = fp.read(8192) - if not data: - break - feedparser.feed(data) - return feedparser.close() - - def parsestr(self, text, headersonly=False): - """Create a message structure from a string. - - Returns the root of the message structure. Optional headersonly is a - flag specifying whether to stop parsing after reading the headers or - not. The default is False, meaning it parses the entire contents of - the file. - """ - return self.parse(StringIO(text), headersonly=headersonly) - - - -class HeaderParser(Parser): - def parse(self, fp, headersonly=True): - return Parser.parse(self, fp, True) - - def parsestr(self, text, headersonly=True): - return Parser.parsestr(self, text, True) - - -class BytesParser(object): - - def __init__(self, *args, **kw): - """Parser of binary RFC 2822 and MIME email messages. - - Creates an in-memory object tree representing the email message, which - can then be manipulated and turned over to a Generator to return the - textual representation of the message. - - The input must be formatted as a block of RFC 2822 headers and header - continuation lines, optionally preceeded by a `Unix-from' header. The - header block is terminated either by the end of the input or by a - blank line. - - _class is the class to instantiate for new message objects when they - must be created. This class must have a constructor that can take - zero arguments. Default is Message.Message. - """ - self.parser = Parser(*args, **kw) - - def parse(self, fp, headersonly=False): - """Create a message structure from the data in a binary file. - - Reads all the data from the file and returns the root of the message - structure. Optional headersonly is a flag specifying whether to stop - parsing after reading the headers or not. The default is False, - meaning it parses the entire contents of the file. - """ - fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape') - with fp: - return self.parser.parse(fp, headersonly) - - - def parsebytes(self, text, headersonly=False): - """Create a message structure from a byte string. - - Returns the root of the message structure. Optional headersonly is a - flag specifying whether to stop parsing after reading the headers or - not. The default is False, meaning it parses the entire contents of - the file. - """ - text = text.decode('ASCII', errors='surrogateescape') - return self.parser.parsestr(text, headersonly) - - -class BytesHeaderParser(BytesParser): - def parse(self, fp, headersonly=True): - return BytesParser.parse(self, fp, headersonly=True) - - def parsebytes(self, text, headersonly=True): - return BytesParser.parsebytes(self, text, headersonly=True) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/policy.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/policy.py deleted file mode 100755 index 2f609a2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/policy.py +++ /dev/null @@ -1,193 +0,0 @@ -"""This will be the home for the policy that hooks in the new -code that adds all the email6 features. -""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import -from future.builtins import super - -from future.standard_library.email._policybase import (Policy, Compat32, - compat32, _extend_docstrings) -from future.standard_library.email.utils import _has_surrogates -from future.standard_library.email.headerregistry import HeaderRegistry as HeaderRegistry - -__all__ = [ - 'Compat32', - 'compat32', - 'Policy', - 'EmailPolicy', - 'default', - 'strict', - 'SMTP', - 'HTTP', - ] - -@_extend_docstrings -class EmailPolicy(Policy): - - """+ - PROVISIONAL - - The API extensions enabled by this policy are currently provisional. - Refer to the documentation for details. - - This policy adds new header parsing and folding algorithms. Instead of - simple strings, headers are custom objects with custom attributes - depending on the type of the field. The folding algorithm fully - implements RFCs 2047 and 5322. - - In addition to the settable attributes listed above that apply to - all Policies, this policy adds the following additional attributes: - - refold_source -- if the value for a header in the Message object - came from the parsing of some source, this attribute - indicates whether or not a generator should refold - that value when transforming the message back into - stream form. The possible values are: - - none -- all source values use original folding - long -- source values that have any line that is - longer than max_line_length will be - refolded - all -- all values are refolded. - - The default is 'long'. - - header_factory -- a callable that takes two arguments, 'name' and - 'value', where 'name' is a header field name and - 'value' is an unfolded header field value, and - returns a string-like object that represents that - header. A default header_factory is provided that - understands some of the RFC5322 header field types. - (Currently address fields and date fields have - special treatment, while all other fields are - treated as unstructured. This list will be - completed before the extension is marked stable.) - """ - - refold_source = 'long' - header_factory = HeaderRegistry() - - def __init__(self, **kw): - # Ensure that each new instance gets a unique header factory - # (as opposed to clones, which share the factory). - if 'header_factory' not in kw: - object.__setattr__(self, 'header_factory', HeaderRegistry()) - super().__init__(**kw) - - def header_max_count(self, name): - """+ - The implementation for this class returns the max_count attribute from - the specialized header class that would be used to construct a header - of type 'name'. - """ - return self.header_factory[name].max_count - - # The logic of the next three methods is chosen such that it is possible to - # switch a Message object between a Compat32 policy and a policy derived - # from this class and have the results stay consistent. This allows a - # Message object constructed with this policy to be passed to a library - # that only handles Compat32 objects, or to receive such an object and - # convert it to use the newer style by just changing its policy. It is - # also chosen because it postpones the relatively expensive full rfc5322 - # parse until as late as possible when parsing from source, since in many - # applications only a few headers will actually be inspected. - - def header_source_parse(self, sourcelines): - """+ - The name is parsed as everything up to the ':' and returned unmodified. - The value is determined by stripping leading whitespace off the - remainder of the first line, joining all subsequent lines together, and - stripping any trailing carriage return or linefeed characters. (This - is the same as Compat32). - - """ - name, value = sourcelines[0].split(':', 1) - value = value.lstrip(' \t') + ''.join(sourcelines[1:]) - return (name, value.rstrip('\r\n')) - - def header_store_parse(self, name, value): - """+ - The name is returned unchanged. If the input value has a 'name' - attribute and it matches the name ignoring case, the value is returned - unchanged. Otherwise the name and value are passed to header_factory - method, and the resulting custom header object is returned as the - value. In this case a ValueError is raised if the input value contains - CR or LF characters. - - """ - if hasattr(value, 'name') and value.name.lower() == name.lower(): - return (name, value) - if isinstance(value, str) and len(value.splitlines())>1: - raise ValueError("Header values may not contain linefeed " - "or carriage return characters") - return (name, self.header_factory(name, value)) - - def header_fetch_parse(self, name, value): - """+ - If the value has a 'name' attribute, it is returned to unmodified. - Otherwise the name and the value with any linesep characters removed - are passed to the header_factory method, and the resulting custom - header object is returned. Any surrogateescaped bytes get turned - into the unicode unknown-character glyph. - - """ - if hasattr(value, 'name'): - return value - return self.header_factory(name, ''.join(value.splitlines())) - - def fold(self, name, value): - """+ - Header folding is controlled by the refold_source policy setting. A - value is considered to be a 'source value' if and only if it does not - have a 'name' attribute (having a 'name' attribute means it is a header - object of some sort). If a source value needs to be refolded according - to the policy, it is converted into a custom header object by passing - the name and the value with any linesep characters removed to the - header_factory method. Folding of a custom header object is done by - calling its fold method with the current policy. - - Source values are split into lines using splitlines. If the value is - not to be refolded, the lines are rejoined using the linesep from the - policy and returned. The exception is lines containing non-ascii - binary data. In that case the value is refolded regardless of the - refold_source setting, which causes the binary data to be CTE encoded - using the unknown-8bit charset. - - """ - return self._fold(name, value, refold_binary=True) - - def fold_binary(self, name, value): - """+ - The same as fold if cte_type is 7bit, except that the returned value is - bytes. - - If cte_type is 8bit, non-ASCII binary data is converted back into - bytes. Headers with binary data are not refolded, regardless of the - refold_header setting, since there is no way to know whether the binary - data consists of single byte characters or multibyte characters. - - """ - folded = self._fold(name, value, refold_binary=self.cte_type=='7bit') - return folded.encode('ascii', 'surrogateescape') - - def _fold(self, name, value, refold_binary=False): - if hasattr(value, 'name'): - return value.fold(policy=self) - maxlen = self.max_line_length if self.max_line_length else float('inf') - lines = value.splitlines() - refold = (self.refold_source == 'all' or - self.refold_source == 'long' and - (lines and len(lines[0])+len(name)+2 > maxlen or - any(len(x) > maxlen for x in lines[1:]))) - if refold or refold_binary and _has_surrogates(value): - return self.header_factory(name, ''.join(lines)).fold(policy=self) - return name + ': ' + self.linesep.join(lines) + self.linesep - - -default = EmailPolicy() -# Make the default policy use the class default header_factory -del default.header_factory -strict = default.clone(raise_on_defect=True) -SMTP = default.clone(linesep='\r\n') -HTTP = default.clone(linesep='\r\n', max_line_length=None) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/quoprimime.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/quoprimime.py deleted file mode 100755 index b69d158..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/quoprimime.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright (C) 2001-2006 Python Software Foundation -# Author: Ben Gertzfield -# Contact: email-sig@python.org - -"""Quoted-printable content transfer encoding per RFCs 2045-2047. - -This module handles the content transfer encoding method defined in RFC 2045 -to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to -safely encode text that is in a character set similar to the 7-bit US ASCII -character set, but that includes some 8-bit characters that are normally not -allowed in email bodies or headers. - -Quoted-printable is very space-inefficient for encoding binary files; use the -email.base64mime module for that instead. - -This module provides an interface to encode and decode both headers and bodies -with quoted-printable encoding. - -RFC 2045 defines a method for including character set information in an -`encoded-word' in a header. This method is commonly used for 8-bit real names -in To:/From:/Cc: etc. fields, as well as Subject: lines. - -This module does not do the line wrapping or end-of-line character -conversion necessary for proper internationalized headers; it only -does dumb encoding and decoding. To deal with the various line -wrapping issues, use the email.header module. -""" -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import -from future.builtins import bytes, chr, dict, int, range, super - -__all__ = [ - 'body_decode', - 'body_encode', - 'body_length', - 'decode', - 'decodestring', - 'header_decode', - 'header_encode', - 'header_length', - 'quote', - 'unquote', - ] - -import re -import io - -from string import ascii_letters, digits, hexdigits - -CRLF = '\r\n' -NL = '\n' -EMPTYSTRING = '' - -# Build a mapping of octets to the expansion of that octet. Since we're only -# going to have 256 of these things, this isn't terribly inefficient -# space-wise. Remember that headers and bodies have different sets of safe -# characters. Initialize both maps with the full expansion, and then override -# the safe bytes with the more compact form. -_QUOPRI_HEADER_MAP = dict((c, '=%02X' % c) for c in range(256)) -_QUOPRI_BODY_MAP = _QUOPRI_HEADER_MAP.copy() - -# Safe header bytes which need no encoding. -for c in bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')): - _QUOPRI_HEADER_MAP[c] = chr(c) -# Headers have one other special encoding; spaces become underscores. -_QUOPRI_HEADER_MAP[ord(' ')] = '_' - -# Safe body bytes which need no encoding. -for c in bytes(b' !"#$%&\'()*+,-./0123456789:;<>' - b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`' - b'abcdefghijklmnopqrstuvwxyz{|}~\t'): - _QUOPRI_BODY_MAP[c] = chr(c) - - - -# Helpers -def header_check(octet): - """Return True if the octet should be escaped with header quopri.""" - return chr(octet) != _QUOPRI_HEADER_MAP[octet] - - -def body_check(octet): - """Return True if the octet should be escaped with body quopri.""" - return chr(octet) != _QUOPRI_BODY_MAP[octet] - - -def header_length(bytearray): - """Return a header quoted-printable encoding length. - - Note that this does not include any RFC 2047 chrome added by - `header_encode()`. - - :param bytearray: An array of bytes (a.k.a. octets). - :return: The length in bytes of the byte array when it is encoded with - quoted-printable for headers. - """ - return sum(len(_QUOPRI_HEADER_MAP[octet]) for octet in bytearray) - - -def body_length(bytearray): - """Return a body quoted-printable encoding length. - - :param bytearray: An array of bytes (a.k.a. octets). - :return: The length in bytes of the byte array when it is encoded with - quoted-printable for bodies. - """ - return sum(len(_QUOPRI_BODY_MAP[octet]) for octet in bytearray) - - -def _max_append(L, s, maxlen, extra=''): - if not isinstance(s, str): - s = chr(s) - if not L: - L.append(s.lstrip()) - elif len(L[-1]) + len(s) <= maxlen: - L[-1] += extra + s - else: - L.append(s.lstrip()) - - -def unquote(s): - """Turn a string in the form =AB to the ASCII character with value 0xab""" - return chr(int(s[1:3], 16)) - - -def quote(c): - return '=%02X' % ord(c) - - - -def header_encode(header_bytes, charset='iso-8859-1'): - """Encode a single header line with quoted-printable (like) encoding. - - Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but - used specifically for email header fields to allow charsets with mostly 7 - bit characters (and some 8 bit) to remain more or less readable in non-RFC - 2045 aware mail clients. - - charset names the character set to use in the RFC 2046 header. It - defaults to iso-8859-1. - """ - # Return empty headers as an empty string. - if not header_bytes: - return '' - # Iterate over every byte, encoding if necessary. - encoded = [] - for octet in header_bytes: - encoded.append(_QUOPRI_HEADER_MAP[octet]) - # Now add the RFC chrome to each encoded chunk and glue the chunks - # together. - return '=?%s?q?%s?=' % (charset, EMPTYSTRING.join(encoded)) - - -class _body_accumulator(io.StringIO): - - def __init__(self, maxlinelen, eol, *args, **kw): - super().__init__(*args, **kw) - self.eol = eol - self.maxlinelen = self.room = maxlinelen - - def write_str(self, s): - """Add string s to the accumulated body.""" - self.write(s) - self.room -= len(s) - - def newline(self): - """Write eol, then start new line.""" - self.write_str(self.eol) - self.room = self.maxlinelen - - def write_soft_break(self): - """Write a soft break, then start a new line.""" - self.write_str('=') - self.newline() - - def write_wrapped(self, s, extra_room=0): - """Add a soft line break if needed, then write s.""" - if self.room < len(s) + extra_room: - self.write_soft_break() - self.write_str(s) - - def write_char(self, c, is_last_char): - if not is_last_char: - # Another character follows on this line, so we must leave - # extra room, either for it or a soft break, and whitespace - # need not be quoted. - self.write_wrapped(c, extra_room=1) - elif c not in ' \t': - # For this and remaining cases, no more characters follow, - # so there is no need to reserve extra room (since a hard - # break will immediately follow). - self.write_wrapped(c) - elif self.room >= 3: - # It's a whitespace character at end-of-line, and we have room - # for the three-character quoted encoding. - self.write(quote(c)) - elif self.room == 2: - # There's room for the whitespace character and a soft break. - self.write(c) - self.write_soft_break() - else: - # There's room only for a soft break. The quoted whitespace - # will be the only content on the subsequent line. - self.write_soft_break() - self.write(quote(c)) - - -def body_encode(body, maxlinelen=76, eol=NL): - """Encode with quoted-printable, wrapping at maxlinelen characters. - - Each line of encoded text will end with eol, which defaults to "\\n". Set - this to "\\r\\n" if you will be using the result of this function directly - in an email. - - Each line will be wrapped at, at most, maxlinelen characters before the - eol string (maxlinelen defaults to 76 characters, the maximum value - permitted by RFC 2045). Long lines will have the 'soft line break' - quoted-printable character "=" appended to them, so the decoded text will - be identical to the original text. - - The minimum maxlinelen is 4 to have room for a quoted character ("=XX") - followed by a soft line break. Smaller values will generate a - ValueError. - - """ - - if maxlinelen < 4: - raise ValueError("maxlinelen must be at least 4") - if not body: - return body - - # The last line may or may not end in eol, but all other lines do. - last_has_eol = (body[-1] in '\r\n') - - # This accumulator will make it easier to build the encoded body. - encoded_body = _body_accumulator(maxlinelen, eol) - - lines = body.splitlines() - last_line_no = len(lines) - 1 - for line_no, line in enumerate(lines): - last_char_index = len(line) - 1 - for i, c in enumerate(line): - if body_check(ord(c)): - c = quote(c) - encoded_body.write_char(c, i==last_char_index) - # Add an eol if input line had eol. All input lines have eol except - # possibly the last one. - if line_no < last_line_no or last_has_eol: - encoded_body.newline() - - return encoded_body.getvalue() - - - -# BAW: I'm not sure if the intent was for the signature of this function to be -# the same as base64MIME.decode() or not... -def decode(encoded, eol=NL): - """Decode a quoted-printable string. - - Lines are separated with eol, which defaults to \\n. - """ - if not encoded: - return encoded - # BAW: see comment in encode() above. Again, we're building up the - # decoded string with string concatenation, which could be done much more - # efficiently. - decoded = '' - - for line in encoded.splitlines(): - line = line.rstrip() - if not line: - decoded += eol - continue - - i = 0 - n = len(line) - while i < n: - c = line[i] - if c != '=': - decoded += c - i += 1 - # Otherwise, c == "=". Are we at the end of the line? If so, add - # a soft line break. - elif i+1 == n: - i += 1 - continue - # Decode if in form =AB - elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits: - decoded += unquote(line[i:i+3]) - i += 3 - # Otherwise, not in form =AB, pass literally - else: - decoded += c - i += 1 - - if i == n: - decoded += eol - # Special case if original string did not end with eol - if encoded[-1] not in '\r\n' and decoded.endswith(eol): - decoded = decoded[:-1] - return decoded - - -# For convenience and backwards compatibility w/ standard base64 module -body_decode = decode -decodestring = decode - - - -def _unquote_match(match): - """Turn a match in the form =AB to the ASCII character with value 0xab""" - s = match.group(0) - return unquote(s) - - -# Header decoding is done a bit differently -def header_decode(s): - """Decode a string encoded with RFC 2045 MIME header `Q' encoding. - - This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use - the high level email.header class for that functionality. - """ - s = s.replace('_', ' ') - return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, re.ASCII) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/utils.py deleted file mode 100755 index 4abebf7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/email/utils.py +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright (C) 2001-2010 Python Software Foundation -# Author: Barry Warsaw -# Contact: email-sig@python.org - -"""Miscellaneous utilities.""" - -from __future__ import unicode_literals -from __future__ import division -from __future__ import absolute_import -from future import utils -from future.builtins import bytes, int, str - -__all__ = [ - 'collapse_rfc2231_value', - 'decode_params', - 'decode_rfc2231', - 'encode_rfc2231', - 'formataddr', - 'formatdate', - 'format_datetime', - 'getaddresses', - 'make_msgid', - 'mktime_tz', - 'parseaddr', - 'parsedate', - 'parsedate_tz', - 'parsedate_to_datetime', - 'unquote', - ] - -import os -import re -if utils.PY2: - re.ASCII = 0 -import time -import base64 -import random -import socket -from future.backports import datetime -from future.backports.urllib.parse import quote as url_quote, unquote as url_unquote -import warnings -from io import StringIO - -from future.backports.email._parseaddr import quote -from future.backports.email._parseaddr import AddressList as _AddressList -from future.backports.email._parseaddr import mktime_tz - -from future.backports.email._parseaddr import parsedate, parsedate_tz, _parsedate_tz - -from quopri import decodestring as _qdecode - -# Intrapackage imports -from future.backports.email.encoders import _bencode, _qencode -from future.backports.email.charset import Charset - -COMMASPACE = ', ' -EMPTYSTRING = '' -UEMPTYSTRING = '' -CRLF = '\r\n' -TICK = "'" - -specialsre = re.compile(r'[][\\()<>@,:;".]') -escapesre = re.compile(r'[\\"]') - -# How to figure out if we are processing strings that come from a byte -# source with undecodable characters. -_has_surrogates = re.compile( - '([^\ud800-\udbff]|\A)[\udc00-\udfff]([^\udc00-\udfff]|\Z)').search - -# How to deal with a string containing bytes before handing it to the -# application through the 'normal' interface. -def _sanitize(string): - # Turn any escaped bytes into unicode 'unknown' char. - original_bytes = string.encode('ascii', 'surrogateescape') - return original_bytes.decode('ascii', 'replace') - - -# Helpers - -def formataddr(pair, charset='utf-8'): - """The inverse of parseaddr(), this takes a 2-tuple of the form - (realname, email_address) and returns the string value suitable - for an RFC 2822 From, To or Cc header. - - If the first element of pair is false, then the second element is - returned unmodified. - - Optional charset if given is the character set that is used to encode - realname in case realname is not ASCII safe. Can be an instance of str or - a Charset-like object which has a header_encode method. Default is - 'utf-8'. - """ - name, address = pair - # The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't. - address.encode('ascii') - if name: - try: - name.encode('ascii') - except UnicodeEncodeError: - if isinstance(charset, str): - charset = Charset(charset) - encoded_name = charset.header_encode(name) - return "%s <%s>" % (encoded_name, address) - else: - quotes = '' - if specialsre.search(name): - quotes = '"' - name = escapesre.sub(r'\\\g<0>', name) - return '%s%s%s <%s>' % (quotes, name, quotes, address) - return address - - - -def getaddresses(fieldvalues): - """Return a list of (REALNAME, EMAIL) for each fieldvalue.""" - all = COMMASPACE.join(fieldvalues) - a = _AddressList(all) - return a.addresslist - - - -ecre = re.compile(r''' - =\? # literal =? - (?P[^?]*?) # non-greedy up to the next ? is the charset - \? # literal ? - (?P[qb]) # either a "q" or a "b", case insensitive - \? # literal ? - (?P.*?) # non-greedy up to the next ?= is the atom - \?= # literal ?= - ''', re.VERBOSE | re.IGNORECASE) - - -def _format_timetuple_and_zone(timetuple, zone): - return '%s, %02d %s %04d %02d:%02d:%02d %s' % ( - ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]], - timetuple[2], - ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1], - timetuple[0], timetuple[3], timetuple[4], timetuple[5], - zone) - -def formatdate(timeval=None, localtime=False, usegmt=False): - """Returns a date string as specified by RFC 2822, e.g.: - - Fri, 09 Nov 2001 01:08:47 -0000 - - Optional timeval if given is a floating point time value as accepted by - gmtime() and localtime(), otherwise the current time is used. - - Optional localtime is a flag that when True, interprets timeval, and - returns a date relative to the local timezone instead of UTC, properly - taking daylight savings time into account. - - Optional argument usegmt means that the timezone is written out as - an ascii string, not numeric one (so "GMT" instead of "+0000"). This - is needed for HTTP, and is only used when localtime==False. - """ - # Note: we cannot use strftime() because that honors the locale and RFC - # 2822 requires that day and month names be the English abbreviations. - if timeval is None: - timeval = time.time() - if localtime: - now = time.localtime(timeval) - # Calculate timezone offset, based on whether the local zone has - # daylight savings time, and whether DST is in effect. - if time.daylight and now[-1]: - offset = time.altzone - else: - offset = time.timezone - hours, minutes = divmod(abs(offset), 3600) - # Remember offset is in seconds west of UTC, but the timezone is in - # minutes east of UTC, so the signs differ. - if offset > 0: - sign = '-' - else: - sign = '+' - zone = '%s%02d%02d' % (sign, hours, minutes // 60) - else: - now = time.gmtime(timeval) - # Timezone offset is always -0000 - if usegmt: - zone = 'GMT' - else: - zone = '-0000' - return _format_timetuple_and_zone(now, zone) - -def format_datetime(dt, usegmt=False): - """Turn a datetime into a date string as specified in RFC 2822. - - If usegmt is True, dt must be an aware datetime with an offset of zero. In - this case 'GMT' will be rendered instead of the normal +0000 required by - RFC2822. This is to support HTTP headers involving date stamps. - """ - now = dt.timetuple() - if usegmt: - if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc: - raise ValueError("usegmt option requires a UTC datetime") - zone = 'GMT' - elif dt.tzinfo is None: - zone = '-0000' - else: - zone = dt.strftime("%z") - return _format_timetuple_and_zone(now, zone) - - -def make_msgid(idstring=None, domain=None): - """Returns a string suitable for RFC 2822 compliant Message-ID, e.g: - - <20020201195627.33539.96671@nightshade.la.mastaler.com> - - Optional idstring if given is a string used to strengthen the - uniqueness of the message id. Optional domain if given provides the - portion of the message id after the '@'. It defaults to the locally - defined hostname. - """ - timeval = time.time() - utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval)) - pid = os.getpid() - randint = random.randrange(100000) - if idstring is None: - idstring = '' - else: - idstring = '.' + idstring - if domain is None: - domain = socket.getfqdn() - msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain) - return msgid - - -def parsedate_to_datetime(data): - _3to2list = list(_parsedate_tz(data)) - dtuple, tz, = [_3to2list[:-1]] + _3to2list[-1:] - if tz is None: - return datetime.datetime(*dtuple[:6]) - return datetime.datetime(*dtuple[:6], - tzinfo=datetime.timezone(datetime.timedelta(seconds=tz))) - - -def parseaddr(addr): - addrs = _AddressList(addr).addresslist - if not addrs: - return '', '' - return addrs[0] - - -# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3. -def unquote(str): - """Remove quotes from a string.""" - if len(str) > 1: - if str.startswith('"') and str.endswith('"'): - return str[1:-1].replace('\\\\', '\\').replace('\\"', '"') - if str.startswith('<') and str.endswith('>'): - return str[1:-1] - return str - - - -# RFC2231-related functions - parameter encoding and decoding -def decode_rfc2231(s): - """Decode string according to RFC 2231""" - parts = s.split(TICK, 2) - if len(parts) <= 2: - return None, None, s - return parts - - -def encode_rfc2231(s, charset=None, language=None): - """Encode string according to RFC 2231. - - If neither charset nor language is given, then s is returned as-is. If - charset is given but not language, the string is encoded using the empty - string for language. - """ - s = url_quote(s, safe='', encoding=charset or 'ascii') - if charset is None and language is None: - return s - if language is None: - language = '' - return "%s'%s'%s" % (charset, language, s) - - -rfc2231_continuation = re.compile(r'^(?P\w+)\*((?P[0-9]+)\*?)?$', - re.ASCII) - -def decode_params(params): - """Decode parameters list according to RFC 2231. - - params is a sequence of 2-tuples containing (param name, string value). - """ - # Copy params so we don't mess with the original - params = params[:] - new_params = [] - # Map parameter's name to a list of continuations. The values are a - # 3-tuple of the continuation number, the string value, and a flag - # specifying whether a particular segment is %-encoded. - rfc2231_params = {} - name, value = params.pop(0) - new_params.append((name, value)) - while params: - name, value = params.pop(0) - if name.endswith('*'): - encoded = True - else: - encoded = False - value = unquote(value) - mo = rfc2231_continuation.match(name) - if mo: - name, num = mo.group('name', 'num') - if num is not None: - num = int(num) - rfc2231_params.setdefault(name, []).append((num, value, encoded)) - else: - new_params.append((name, '"%s"' % quote(value))) - if rfc2231_params: - for name, continuations in rfc2231_params.items(): - value = [] - extended = False - # Sort by number - continuations.sort() - # And now append all values in numerical order, converting - # %-encodings for the encoded segments. If any of the - # continuation names ends in a *, then the entire string, after - # decoding segments and concatenating, must have the charset and - # language specifiers at the beginning of the string. - for num, s, encoded in continuations: - if encoded: - # Decode as "latin-1", so the characters in s directly - # represent the percent-encoded octet values. - # collapse_rfc2231_value treats this as an octet sequence. - s = url_unquote(s, encoding="latin-1") - extended = True - value.append(s) - value = quote(EMPTYSTRING.join(value)) - if extended: - charset, language, value = decode_rfc2231(value) - new_params.append((name, (charset, language, '"%s"' % value))) - else: - new_params.append((name, '"%s"' % value)) - return new_params - -def collapse_rfc2231_value(value, errors='replace', - fallback_charset='us-ascii'): - if not isinstance(value, tuple) or len(value) != 3: - return unquote(value) - # While value comes to us as a unicode string, we need it to be a bytes - # object. We do not want bytes() normal utf-8 decoder, we want a straight - # interpretation of the string as character bytes. - charset, language, text = value - rawbytes = bytes(text, 'raw-unicode-escape') - try: - return str(rawbytes, charset, errors) - except LookupError: - # charset is not a known codec. - return unquote(text) - - -# -# datetime doesn't provide a localtime function yet, so provide one. Code -# adapted from the patch in issue 9527. This may not be perfect, but it is -# better than not having it. -# - -def localtime(dt=None, isdst=-1): - """Return local time as an aware datetime object. - - If called without arguments, return current time. Otherwise *dt* - argument should be a datetime instance, and it is converted to the - local time zone according to the system time zone database. If *dt* is - naive (that is, dt.tzinfo is None), it is assumed to be in local time. - In this case, a positive or zero value for *isdst* causes localtime to - presume initially that summer time (for example, Daylight Saving Time) - is or is not (respectively) in effect for the specified time. A - negative value for *isdst* causes the localtime() function to attempt - to divine whether summer time is in effect for the specified time. - - """ - if dt is None: - return datetime.datetime.now(datetime.timezone.utc).astimezone() - if dt.tzinfo is not None: - return dt.astimezone() - # We have a naive datetime. Convert to a (localtime) timetuple and pass to - # system mktime together with the isdst hint. System mktime will return - # seconds since epoch. - tm = dt.timetuple()[:-1] + (isdst,) - seconds = time.mktime(tm) - localtm = time.localtime(seconds) - try: - delta = datetime.timedelta(seconds=localtm.tm_gmtoff) - tz = datetime.timezone(delta, localtm.tm_zone) - except AttributeError: - # Compute UTC offset and compare with the value implied by tm_isdst. - # If the values match, use the zone name implied by tm_isdst. - delta = dt - datetime.datetime(*time.gmtime(seconds)[:6]) - dst = time.daylight and localtm.tm_isdst > 0 - gmtoff = -(time.altzone if dst else time.timezone) - if delta == datetime.timedelta(seconds=gmtoff): - tz = datetime.timezone(delta, time.tzname[dst]) - else: - tz = datetime.timezone(delta) - return dt.replace(tzinfo=tz) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/__init__.py deleted file mode 100755 index 58e133f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -""" -General functions for HTML manipulation, backported from Py3. - -Note that this uses Python 2.7 code with the corresponding Python 3 -module names and locations. -""" - -from __future__ import unicode_literals - - -_escape_map = {ord('&'): '&', ord('<'): '<', ord('>'): '>'} -_escape_map_full = {ord('&'): '&', ord('<'): '<', ord('>'): '>', - ord('"'): '"', ord('\''): '''} - -# NB: this is a candidate for a bytes/string polymorphic interface - -def escape(s, quote=True): - """ - Replace special characters "&", "<" and ">" to HTML-safe sequences. - If the optional flag quote is true (the default), the quotation mark - characters, both double quote (") and single quote (') characters are also - translated. - """ - assert not isinstance(s, bytes), 'Pass a unicode string' - if quote: - return s.translate(_escape_map_full) - return s.translate(_escape_map) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/entities.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/entities.py deleted file mode 100755 index 5c73f69..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/entities.py +++ /dev/null @@ -1,2514 +0,0 @@ -"""HTML character entity references. - -Backported for python-future from Python 3.3 -""" - -from __future__ import (absolute_import, division, - print_function, unicode_literals) -from future.builtins import * - - -# maps the HTML entity name to the Unicode codepoint -name2codepoint = { - 'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1 - 'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1 - 'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1 - 'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1 - 'Alpha': 0x0391, # greek capital letter alpha, U+0391 - 'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1 - 'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1 - 'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1 - 'Beta': 0x0392, # greek capital letter beta, U+0392 - 'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1 - 'Chi': 0x03a7, # greek capital letter chi, U+03A7 - 'Dagger': 0x2021, # double dagger, U+2021 ISOpub - 'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3 - 'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1 - 'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1 - 'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1 - 'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1 - 'Epsilon': 0x0395, # greek capital letter epsilon, U+0395 - 'Eta': 0x0397, # greek capital letter eta, U+0397 - 'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1 - 'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3 - 'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1 - 'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1 - 'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1 - 'Iota': 0x0399, # greek capital letter iota, U+0399 - 'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1 - 'Kappa': 0x039a, # greek capital letter kappa, U+039A - 'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3 - 'Mu': 0x039c, # greek capital letter mu, U+039C - 'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1 - 'Nu': 0x039d, # greek capital letter nu, U+039D - 'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2 - 'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1 - 'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1 - 'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1 - 'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3 - 'Omicron': 0x039f, # greek capital letter omicron, U+039F - 'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1 - 'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1 - 'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1 - 'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3 - 'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3 - 'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech - 'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3 - 'Rho': 0x03a1, # greek capital letter rho, U+03A1 - 'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2 - 'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3 - 'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1 - 'Tau': 0x03a4, # greek capital letter tau, U+03A4 - 'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3 - 'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1 - 'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1 - 'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1 - 'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3 - 'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1 - 'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3 - 'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1 - 'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2 - 'Zeta': 0x0396, # greek capital letter zeta, U+0396 - 'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1 - 'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1 - 'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia - 'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1 - 'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1 - 'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW - 'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3 - 'amp': 0x0026, # ampersand, U+0026 ISOnum - 'and': 0x2227, # logical and = wedge, U+2227 ISOtech - 'ang': 0x2220, # angle, U+2220 ISOamso - 'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1 - 'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr - 'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1 - 'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1 - 'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW - 'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3 - 'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum - 'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub - 'cap': 0x2229, # intersection = cap, U+2229 ISOtech - 'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1 - 'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia - 'cent': 0x00a2, # cent sign, U+00A2 ISOnum - 'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3 - 'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub - 'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub - 'cong': 0x2245, # approximately equal to, U+2245 ISOtech - 'copy': 0x00a9, # copyright sign, U+00A9 ISOnum - 'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW - 'cup': 0x222a, # union = cup, U+222A ISOtech - 'curren': 0x00a4, # currency sign, U+00A4 ISOnum - 'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa - 'dagger': 0x2020, # dagger, U+2020 ISOpub - 'darr': 0x2193, # downwards arrow, U+2193 ISOnum - 'deg': 0x00b0, # degree sign, U+00B0 ISOnum - 'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3 - 'diams': 0x2666, # black diamond suit, U+2666 ISOpub - 'divide': 0x00f7, # division sign, U+00F7 ISOnum - 'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1 - 'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1 - 'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1 - 'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso - 'emsp': 0x2003, # em space, U+2003 ISOpub - 'ensp': 0x2002, # en space, U+2002 ISOpub - 'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3 - 'equiv': 0x2261, # identical to, U+2261 ISOtech - 'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3 - 'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1 - 'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1 - 'euro': 0x20ac, # euro sign, U+20AC NEW - 'exist': 0x2203, # there exists, U+2203 ISOtech - 'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech - 'forall': 0x2200, # for all, U+2200 ISOtech - 'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum - 'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum - 'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum - 'frasl': 0x2044, # fraction slash, U+2044 NEW - 'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3 - 'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech - 'gt': 0x003e, # greater-than sign, U+003E ISOnum - 'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa - 'harr': 0x2194, # left right arrow, U+2194 ISOamsa - 'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub - 'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub - 'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1 - 'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1 - 'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum - 'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1 - 'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso - 'infin': 0x221e, # infinity, U+221E ISOtech - 'int': 0x222b, # integral, U+222B ISOtech - 'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3 - 'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum - 'isin': 0x2208, # element of, U+2208 ISOtech - 'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1 - 'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3 - 'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech - 'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3 - 'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech - 'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum - 'larr': 0x2190, # leftwards arrow, U+2190 ISOnum - 'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc - 'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum - 'le': 0x2264, # less-than or equal to, U+2264 ISOtech - 'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc - 'lowast': 0x2217, # asterisk operator, U+2217 ISOtech - 'loz': 0x25ca, # lozenge, U+25CA ISOpub - 'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070 - 'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed - 'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum - 'lt': 0x003c, # less-than sign, U+003C ISOnum - 'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia - 'mdash': 0x2014, # em dash, U+2014 ISOpub - 'micro': 0x00b5, # micro sign, U+00B5 ISOnum - 'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum - 'minus': 0x2212, # minus sign, U+2212 ISOtech - 'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3 - 'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech - 'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum - 'ndash': 0x2013, # en dash, U+2013 ISOpub - 'ne': 0x2260, # not equal to, U+2260 ISOtech - 'ni': 0x220b, # contains as member, U+220B ISOtech - 'not': 0x00ac, # not sign, U+00AC ISOnum - 'notin': 0x2209, # not an element of, U+2209 ISOtech - 'nsub': 0x2284, # not a subset of, U+2284 ISOamsn - 'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1 - 'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3 - 'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1 - 'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1 - 'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2 - 'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1 - 'oline': 0x203e, # overline = spacing overscore, U+203E NEW - 'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3 - 'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW - 'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb - 'or': 0x2228, # logical or = vee, U+2228 ISOtech - 'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum - 'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum - 'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1 - 'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1 - 'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb - 'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1 - 'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum - 'part': 0x2202, # partial differential, U+2202 ISOtech - 'permil': 0x2030, # per mille sign, U+2030 ISOtech - 'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech - 'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3 - 'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3 - 'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3 - 'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum - 'pound': 0x00a3, # pound sign, U+00A3 ISOnum - 'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech - 'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb - 'prop': 0x221d, # proportional to, U+221D ISOtech - 'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3 - 'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum - 'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech - 'radic': 0x221a, # square root = radical sign, U+221A ISOtech - 'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech - 'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum - 'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum - 'rceil': 0x2309, # right ceiling, U+2309 ISOamsc - 'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum - 'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso - 'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum - 'rfloor': 0x230b, # right floor, U+230B ISOamsc - 'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3 - 'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070 - 'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed - 'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum - 'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW - 'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2 - 'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb - 'sect': 0x00a7, # section sign, U+00A7 ISOnum - 'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum - 'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3 - 'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3 - 'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech - 'spades': 0x2660, # black spade suit, U+2660 ISOpub - 'sub': 0x2282, # subset of, U+2282 ISOtech - 'sube': 0x2286, # subset of or equal to, U+2286 ISOtech - 'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb - 'sup': 0x2283, # superset of, U+2283 ISOtech - 'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum - 'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum - 'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum - 'supe': 0x2287, # superset of or equal to, U+2287 ISOtech - 'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1 - 'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3 - 'there4': 0x2234, # therefore, U+2234 ISOtech - 'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3 - 'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW - 'thinsp': 0x2009, # thin space, U+2009 ISOpub - 'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1 - 'tilde': 0x02dc, # small tilde, U+02DC ISOdia - 'times': 0x00d7, # multiplication sign, U+00D7 ISOnum - 'trade': 0x2122, # trade mark sign, U+2122 ISOnum - 'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa - 'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1 - 'uarr': 0x2191, # upwards arrow, U+2191 ISOnum - 'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1 - 'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1 - 'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia - 'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW - 'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3 - 'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1 - 'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso - 'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3 - 'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1 - 'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum - 'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1 - 'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3 - 'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070 - 'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070 -} - - -# maps the HTML5 named character references to the equivalent Unicode character(s) -html5 = { - 'Aacute': '\xc1', - 'aacute': '\xe1', - 'Aacute;': '\xc1', - 'aacute;': '\xe1', - 'Abreve;': '\u0102', - 'abreve;': '\u0103', - 'ac;': '\u223e', - 'acd;': '\u223f', - 'acE;': '\u223e\u0333', - 'Acirc': '\xc2', - 'acirc': '\xe2', - 'Acirc;': '\xc2', - 'acirc;': '\xe2', - 'acute': '\xb4', - 'acute;': '\xb4', - 'Acy;': '\u0410', - 'acy;': '\u0430', - 'AElig': '\xc6', - 'aelig': '\xe6', - 'AElig;': '\xc6', - 'aelig;': '\xe6', - 'af;': '\u2061', - 'Afr;': '\U0001d504', - 'afr;': '\U0001d51e', - 'Agrave': '\xc0', - 'agrave': '\xe0', - 'Agrave;': '\xc0', - 'agrave;': '\xe0', - 'alefsym;': '\u2135', - 'aleph;': '\u2135', - 'Alpha;': '\u0391', - 'alpha;': '\u03b1', - 'Amacr;': '\u0100', - 'amacr;': '\u0101', - 'amalg;': '\u2a3f', - 'AMP': '&', - 'amp': '&', - 'AMP;': '&', - 'amp;': '&', - 'And;': '\u2a53', - 'and;': '\u2227', - 'andand;': '\u2a55', - 'andd;': '\u2a5c', - 'andslope;': '\u2a58', - 'andv;': '\u2a5a', - 'ang;': '\u2220', - 'ange;': '\u29a4', - 'angle;': '\u2220', - 'angmsd;': '\u2221', - 'angmsdaa;': '\u29a8', - 'angmsdab;': '\u29a9', - 'angmsdac;': '\u29aa', - 'angmsdad;': '\u29ab', - 'angmsdae;': '\u29ac', - 'angmsdaf;': '\u29ad', - 'angmsdag;': '\u29ae', - 'angmsdah;': '\u29af', - 'angrt;': '\u221f', - 'angrtvb;': '\u22be', - 'angrtvbd;': '\u299d', - 'angsph;': '\u2222', - 'angst;': '\xc5', - 'angzarr;': '\u237c', - 'Aogon;': '\u0104', - 'aogon;': '\u0105', - 'Aopf;': '\U0001d538', - 'aopf;': '\U0001d552', - 'ap;': '\u2248', - 'apacir;': '\u2a6f', - 'apE;': '\u2a70', - 'ape;': '\u224a', - 'apid;': '\u224b', - 'apos;': "'", - 'ApplyFunction;': '\u2061', - 'approx;': '\u2248', - 'approxeq;': '\u224a', - 'Aring': '\xc5', - 'aring': '\xe5', - 'Aring;': '\xc5', - 'aring;': '\xe5', - 'Ascr;': '\U0001d49c', - 'ascr;': '\U0001d4b6', - 'Assign;': '\u2254', - 'ast;': '*', - 'asymp;': '\u2248', - 'asympeq;': '\u224d', - 'Atilde': '\xc3', - 'atilde': '\xe3', - 'Atilde;': '\xc3', - 'atilde;': '\xe3', - 'Auml': '\xc4', - 'auml': '\xe4', - 'Auml;': '\xc4', - 'auml;': '\xe4', - 'awconint;': '\u2233', - 'awint;': '\u2a11', - 'backcong;': '\u224c', - 'backepsilon;': '\u03f6', - 'backprime;': '\u2035', - 'backsim;': '\u223d', - 'backsimeq;': '\u22cd', - 'Backslash;': '\u2216', - 'Barv;': '\u2ae7', - 'barvee;': '\u22bd', - 'Barwed;': '\u2306', - 'barwed;': '\u2305', - 'barwedge;': '\u2305', - 'bbrk;': '\u23b5', - 'bbrktbrk;': '\u23b6', - 'bcong;': '\u224c', - 'Bcy;': '\u0411', - 'bcy;': '\u0431', - 'bdquo;': '\u201e', - 'becaus;': '\u2235', - 'Because;': '\u2235', - 'because;': '\u2235', - 'bemptyv;': '\u29b0', - 'bepsi;': '\u03f6', - 'bernou;': '\u212c', - 'Bernoullis;': '\u212c', - 'Beta;': '\u0392', - 'beta;': '\u03b2', - 'beth;': '\u2136', - 'between;': '\u226c', - 'Bfr;': '\U0001d505', - 'bfr;': '\U0001d51f', - 'bigcap;': '\u22c2', - 'bigcirc;': '\u25ef', - 'bigcup;': '\u22c3', - 'bigodot;': '\u2a00', - 'bigoplus;': '\u2a01', - 'bigotimes;': '\u2a02', - 'bigsqcup;': '\u2a06', - 'bigstar;': '\u2605', - 'bigtriangledown;': '\u25bd', - 'bigtriangleup;': '\u25b3', - 'biguplus;': '\u2a04', - 'bigvee;': '\u22c1', - 'bigwedge;': '\u22c0', - 'bkarow;': '\u290d', - 'blacklozenge;': '\u29eb', - 'blacksquare;': '\u25aa', - 'blacktriangle;': '\u25b4', - 'blacktriangledown;': '\u25be', - 'blacktriangleleft;': '\u25c2', - 'blacktriangleright;': '\u25b8', - 'blank;': '\u2423', - 'blk12;': '\u2592', - 'blk14;': '\u2591', - 'blk34;': '\u2593', - 'block;': '\u2588', - 'bne;': '=\u20e5', - 'bnequiv;': '\u2261\u20e5', - 'bNot;': '\u2aed', - 'bnot;': '\u2310', - 'Bopf;': '\U0001d539', - 'bopf;': '\U0001d553', - 'bot;': '\u22a5', - 'bottom;': '\u22a5', - 'bowtie;': '\u22c8', - 'boxbox;': '\u29c9', - 'boxDL;': '\u2557', - 'boxDl;': '\u2556', - 'boxdL;': '\u2555', - 'boxdl;': '\u2510', - 'boxDR;': '\u2554', - 'boxDr;': '\u2553', - 'boxdR;': '\u2552', - 'boxdr;': '\u250c', - 'boxH;': '\u2550', - 'boxh;': '\u2500', - 'boxHD;': '\u2566', - 'boxHd;': '\u2564', - 'boxhD;': '\u2565', - 'boxhd;': '\u252c', - 'boxHU;': '\u2569', - 'boxHu;': '\u2567', - 'boxhU;': '\u2568', - 'boxhu;': '\u2534', - 'boxminus;': '\u229f', - 'boxplus;': '\u229e', - 'boxtimes;': '\u22a0', - 'boxUL;': '\u255d', - 'boxUl;': '\u255c', - 'boxuL;': '\u255b', - 'boxul;': '\u2518', - 'boxUR;': '\u255a', - 'boxUr;': '\u2559', - 'boxuR;': '\u2558', - 'boxur;': '\u2514', - 'boxV;': '\u2551', - 'boxv;': '\u2502', - 'boxVH;': '\u256c', - 'boxVh;': '\u256b', - 'boxvH;': '\u256a', - 'boxvh;': '\u253c', - 'boxVL;': '\u2563', - 'boxVl;': '\u2562', - 'boxvL;': '\u2561', - 'boxvl;': '\u2524', - 'boxVR;': '\u2560', - 'boxVr;': '\u255f', - 'boxvR;': '\u255e', - 'boxvr;': '\u251c', - 'bprime;': '\u2035', - 'Breve;': '\u02d8', - 'breve;': '\u02d8', - 'brvbar': '\xa6', - 'brvbar;': '\xa6', - 'Bscr;': '\u212c', - 'bscr;': '\U0001d4b7', - 'bsemi;': '\u204f', - 'bsim;': '\u223d', - 'bsime;': '\u22cd', - 'bsol;': '\\', - 'bsolb;': '\u29c5', - 'bsolhsub;': '\u27c8', - 'bull;': '\u2022', - 'bullet;': '\u2022', - 'bump;': '\u224e', - 'bumpE;': '\u2aae', - 'bumpe;': '\u224f', - 'Bumpeq;': '\u224e', - 'bumpeq;': '\u224f', - 'Cacute;': '\u0106', - 'cacute;': '\u0107', - 'Cap;': '\u22d2', - 'cap;': '\u2229', - 'capand;': '\u2a44', - 'capbrcup;': '\u2a49', - 'capcap;': '\u2a4b', - 'capcup;': '\u2a47', - 'capdot;': '\u2a40', - 'CapitalDifferentialD;': '\u2145', - 'caps;': '\u2229\ufe00', - 'caret;': '\u2041', - 'caron;': '\u02c7', - 'Cayleys;': '\u212d', - 'ccaps;': '\u2a4d', - 'Ccaron;': '\u010c', - 'ccaron;': '\u010d', - 'Ccedil': '\xc7', - 'ccedil': '\xe7', - 'Ccedil;': '\xc7', - 'ccedil;': '\xe7', - 'Ccirc;': '\u0108', - 'ccirc;': '\u0109', - 'Cconint;': '\u2230', - 'ccups;': '\u2a4c', - 'ccupssm;': '\u2a50', - 'Cdot;': '\u010a', - 'cdot;': '\u010b', - 'cedil': '\xb8', - 'cedil;': '\xb8', - 'Cedilla;': '\xb8', - 'cemptyv;': '\u29b2', - 'cent': '\xa2', - 'cent;': '\xa2', - 'CenterDot;': '\xb7', - 'centerdot;': '\xb7', - 'Cfr;': '\u212d', - 'cfr;': '\U0001d520', - 'CHcy;': '\u0427', - 'chcy;': '\u0447', - 'check;': '\u2713', - 'checkmark;': '\u2713', - 'Chi;': '\u03a7', - 'chi;': '\u03c7', - 'cir;': '\u25cb', - 'circ;': '\u02c6', - 'circeq;': '\u2257', - 'circlearrowleft;': '\u21ba', - 'circlearrowright;': '\u21bb', - 'circledast;': '\u229b', - 'circledcirc;': '\u229a', - 'circleddash;': '\u229d', - 'CircleDot;': '\u2299', - 'circledR;': '\xae', - 'circledS;': '\u24c8', - 'CircleMinus;': '\u2296', - 'CirclePlus;': '\u2295', - 'CircleTimes;': '\u2297', - 'cirE;': '\u29c3', - 'cire;': '\u2257', - 'cirfnint;': '\u2a10', - 'cirmid;': '\u2aef', - 'cirscir;': '\u29c2', - 'ClockwiseContourIntegral;': '\u2232', - 'CloseCurlyDoubleQuote;': '\u201d', - 'CloseCurlyQuote;': '\u2019', - 'clubs;': '\u2663', - 'clubsuit;': '\u2663', - 'Colon;': '\u2237', - 'colon;': ':', - 'Colone;': '\u2a74', - 'colone;': '\u2254', - 'coloneq;': '\u2254', - 'comma;': ',', - 'commat;': '@', - 'comp;': '\u2201', - 'compfn;': '\u2218', - 'complement;': '\u2201', - 'complexes;': '\u2102', - 'cong;': '\u2245', - 'congdot;': '\u2a6d', - 'Congruent;': '\u2261', - 'Conint;': '\u222f', - 'conint;': '\u222e', - 'ContourIntegral;': '\u222e', - 'Copf;': '\u2102', - 'copf;': '\U0001d554', - 'coprod;': '\u2210', - 'Coproduct;': '\u2210', - 'COPY': '\xa9', - 'copy': '\xa9', - 'COPY;': '\xa9', - 'copy;': '\xa9', - 'copysr;': '\u2117', - 'CounterClockwiseContourIntegral;': '\u2233', - 'crarr;': '\u21b5', - 'Cross;': '\u2a2f', - 'cross;': '\u2717', - 'Cscr;': '\U0001d49e', - 'cscr;': '\U0001d4b8', - 'csub;': '\u2acf', - 'csube;': '\u2ad1', - 'csup;': '\u2ad0', - 'csupe;': '\u2ad2', - 'ctdot;': '\u22ef', - 'cudarrl;': '\u2938', - 'cudarrr;': '\u2935', - 'cuepr;': '\u22de', - 'cuesc;': '\u22df', - 'cularr;': '\u21b6', - 'cularrp;': '\u293d', - 'Cup;': '\u22d3', - 'cup;': '\u222a', - 'cupbrcap;': '\u2a48', - 'CupCap;': '\u224d', - 'cupcap;': '\u2a46', - 'cupcup;': '\u2a4a', - 'cupdot;': '\u228d', - 'cupor;': '\u2a45', - 'cups;': '\u222a\ufe00', - 'curarr;': '\u21b7', - 'curarrm;': '\u293c', - 'curlyeqprec;': '\u22de', - 'curlyeqsucc;': '\u22df', - 'curlyvee;': '\u22ce', - 'curlywedge;': '\u22cf', - 'curren': '\xa4', - 'curren;': '\xa4', - 'curvearrowleft;': '\u21b6', - 'curvearrowright;': '\u21b7', - 'cuvee;': '\u22ce', - 'cuwed;': '\u22cf', - 'cwconint;': '\u2232', - 'cwint;': '\u2231', - 'cylcty;': '\u232d', - 'Dagger;': '\u2021', - 'dagger;': '\u2020', - 'daleth;': '\u2138', - 'Darr;': '\u21a1', - 'dArr;': '\u21d3', - 'darr;': '\u2193', - 'dash;': '\u2010', - 'Dashv;': '\u2ae4', - 'dashv;': '\u22a3', - 'dbkarow;': '\u290f', - 'dblac;': '\u02dd', - 'Dcaron;': '\u010e', - 'dcaron;': '\u010f', - 'Dcy;': '\u0414', - 'dcy;': '\u0434', - 'DD;': '\u2145', - 'dd;': '\u2146', - 'ddagger;': '\u2021', - 'ddarr;': '\u21ca', - 'DDotrahd;': '\u2911', - 'ddotseq;': '\u2a77', - 'deg': '\xb0', - 'deg;': '\xb0', - 'Del;': '\u2207', - 'Delta;': '\u0394', - 'delta;': '\u03b4', - 'demptyv;': '\u29b1', - 'dfisht;': '\u297f', - 'Dfr;': '\U0001d507', - 'dfr;': '\U0001d521', - 'dHar;': '\u2965', - 'dharl;': '\u21c3', - 'dharr;': '\u21c2', - 'DiacriticalAcute;': '\xb4', - 'DiacriticalDot;': '\u02d9', - 'DiacriticalDoubleAcute;': '\u02dd', - 'DiacriticalGrave;': '`', - 'DiacriticalTilde;': '\u02dc', - 'diam;': '\u22c4', - 'Diamond;': '\u22c4', - 'diamond;': '\u22c4', - 'diamondsuit;': '\u2666', - 'diams;': '\u2666', - 'die;': '\xa8', - 'DifferentialD;': '\u2146', - 'digamma;': '\u03dd', - 'disin;': '\u22f2', - 'div;': '\xf7', - 'divide': '\xf7', - 'divide;': '\xf7', - 'divideontimes;': '\u22c7', - 'divonx;': '\u22c7', - 'DJcy;': '\u0402', - 'djcy;': '\u0452', - 'dlcorn;': '\u231e', - 'dlcrop;': '\u230d', - 'dollar;': '$', - 'Dopf;': '\U0001d53b', - 'dopf;': '\U0001d555', - 'Dot;': '\xa8', - 'dot;': '\u02d9', - 'DotDot;': '\u20dc', - 'doteq;': '\u2250', - 'doteqdot;': '\u2251', - 'DotEqual;': '\u2250', - 'dotminus;': '\u2238', - 'dotplus;': '\u2214', - 'dotsquare;': '\u22a1', - 'doublebarwedge;': '\u2306', - 'DoubleContourIntegral;': '\u222f', - 'DoubleDot;': '\xa8', - 'DoubleDownArrow;': '\u21d3', - 'DoubleLeftArrow;': '\u21d0', - 'DoubleLeftRightArrow;': '\u21d4', - 'DoubleLeftTee;': '\u2ae4', - 'DoubleLongLeftArrow;': '\u27f8', - 'DoubleLongLeftRightArrow;': '\u27fa', - 'DoubleLongRightArrow;': '\u27f9', - 'DoubleRightArrow;': '\u21d2', - 'DoubleRightTee;': '\u22a8', - 'DoubleUpArrow;': '\u21d1', - 'DoubleUpDownArrow;': '\u21d5', - 'DoubleVerticalBar;': '\u2225', - 'DownArrow;': '\u2193', - 'Downarrow;': '\u21d3', - 'downarrow;': '\u2193', - 'DownArrowBar;': '\u2913', - 'DownArrowUpArrow;': '\u21f5', - 'DownBreve;': '\u0311', - 'downdownarrows;': '\u21ca', - 'downharpoonleft;': '\u21c3', - 'downharpoonright;': '\u21c2', - 'DownLeftRightVector;': '\u2950', - 'DownLeftTeeVector;': '\u295e', - 'DownLeftVector;': '\u21bd', - 'DownLeftVectorBar;': '\u2956', - 'DownRightTeeVector;': '\u295f', - 'DownRightVector;': '\u21c1', - 'DownRightVectorBar;': '\u2957', - 'DownTee;': '\u22a4', - 'DownTeeArrow;': '\u21a7', - 'drbkarow;': '\u2910', - 'drcorn;': '\u231f', - 'drcrop;': '\u230c', - 'Dscr;': '\U0001d49f', - 'dscr;': '\U0001d4b9', - 'DScy;': '\u0405', - 'dscy;': '\u0455', - 'dsol;': '\u29f6', - 'Dstrok;': '\u0110', - 'dstrok;': '\u0111', - 'dtdot;': '\u22f1', - 'dtri;': '\u25bf', - 'dtrif;': '\u25be', - 'duarr;': '\u21f5', - 'duhar;': '\u296f', - 'dwangle;': '\u29a6', - 'DZcy;': '\u040f', - 'dzcy;': '\u045f', - 'dzigrarr;': '\u27ff', - 'Eacute': '\xc9', - 'eacute': '\xe9', - 'Eacute;': '\xc9', - 'eacute;': '\xe9', - 'easter;': '\u2a6e', - 'Ecaron;': '\u011a', - 'ecaron;': '\u011b', - 'ecir;': '\u2256', - 'Ecirc': '\xca', - 'ecirc': '\xea', - 'Ecirc;': '\xca', - 'ecirc;': '\xea', - 'ecolon;': '\u2255', - 'Ecy;': '\u042d', - 'ecy;': '\u044d', - 'eDDot;': '\u2a77', - 'Edot;': '\u0116', - 'eDot;': '\u2251', - 'edot;': '\u0117', - 'ee;': '\u2147', - 'efDot;': '\u2252', - 'Efr;': '\U0001d508', - 'efr;': '\U0001d522', - 'eg;': '\u2a9a', - 'Egrave': '\xc8', - 'egrave': '\xe8', - 'Egrave;': '\xc8', - 'egrave;': '\xe8', - 'egs;': '\u2a96', - 'egsdot;': '\u2a98', - 'el;': '\u2a99', - 'Element;': '\u2208', - 'elinters;': '\u23e7', - 'ell;': '\u2113', - 'els;': '\u2a95', - 'elsdot;': '\u2a97', - 'Emacr;': '\u0112', - 'emacr;': '\u0113', - 'empty;': '\u2205', - 'emptyset;': '\u2205', - 'EmptySmallSquare;': '\u25fb', - 'emptyv;': '\u2205', - 'EmptyVerySmallSquare;': '\u25ab', - 'emsp13;': '\u2004', - 'emsp14;': '\u2005', - 'emsp;': '\u2003', - 'ENG;': '\u014a', - 'eng;': '\u014b', - 'ensp;': '\u2002', - 'Eogon;': '\u0118', - 'eogon;': '\u0119', - 'Eopf;': '\U0001d53c', - 'eopf;': '\U0001d556', - 'epar;': '\u22d5', - 'eparsl;': '\u29e3', - 'eplus;': '\u2a71', - 'epsi;': '\u03b5', - 'Epsilon;': '\u0395', - 'epsilon;': '\u03b5', - 'epsiv;': '\u03f5', - 'eqcirc;': '\u2256', - 'eqcolon;': '\u2255', - 'eqsim;': '\u2242', - 'eqslantgtr;': '\u2a96', - 'eqslantless;': '\u2a95', - 'Equal;': '\u2a75', - 'equals;': '=', - 'EqualTilde;': '\u2242', - 'equest;': '\u225f', - 'Equilibrium;': '\u21cc', - 'equiv;': '\u2261', - 'equivDD;': '\u2a78', - 'eqvparsl;': '\u29e5', - 'erarr;': '\u2971', - 'erDot;': '\u2253', - 'Escr;': '\u2130', - 'escr;': '\u212f', - 'esdot;': '\u2250', - 'Esim;': '\u2a73', - 'esim;': '\u2242', - 'Eta;': '\u0397', - 'eta;': '\u03b7', - 'ETH': '\xd0', - 'eth': '\xf0', - 'ETH;': '\xd0', - 'eth;': '\xf0', - 'Euml': '\xcb', - 'euml': '\xeb', - 'Euml;': '\xcb', - 'euml;': '\xeb', - 'euro;': '\u20ac', - 'excl;': '!', - 'exist;': '\u2203', - 'Exists;': '\u2203', - 'expectation;': '\u2130', - 'ExponentialE;': '\u2147', - 'exponentiale;': '\u2147', - 'fallingdotseq;': '\u2252', - 'Fcy;': '\u0424', - 'fcy;': '\u0444', - 'female;': '\u2640', - 'ffilig;': '\ufb03', - 'fflig;': '\ufb00', - 'ffllig;': '\ufb04', - 'Ffr;': '\U0001d509', - 'ffr;': '\U0001d523', - 'filig;': '\ufb01', - 'FilledSmallSquare;': '\u25fc', - 'FilledVerySmallSquare;': '\u25aa', - 'fjlig;': 'fj', - 'flat;': '\u266d', - 'fllig;': '\ufb02', - 'fltns;': '\u25b1', - 'fnof;': '\u0192', - 'Fopf;': '\U0001d53d', - 'fopf;': '\U0001d557', - 'ForAll;': '\u2200', - 'forall;': '\u2200', - 'fork;': '\u22d4', - 'forkv;': '\u2ad9', - 'Fouriertrf;': '\u2131', - 'fpartint;': '\u2a0d', - 'frac12': '\xbd', - 'frac12;': '\xbd', - 'frac13;': '\u2153', - 'frac14': '\xbc', - 'frac14;': '\xbc', - 'frac15;': '\u2155', - 'frac16;': '\u2159', - 'frac18;': '\u215b', - 'frac23;': '\u2154', - 'frac25;': '\u2156', - 'frac34': '\xbe', - 'frac34;': '\xbe', - 'frac35;': '\u2157', - 'frac38;': '\u215c', - 'frac45;': '\u2158', - 'frac56;': '\u215a', - 'frac58;': '\u215d', - 'frac78;': '\u215e', - 'frasl;': '\u2044', - 'frown;': '\u2322', - 'Fscr;': '\u2131', - 'fscr;': '\U0001d4bb', - 'gacute;': '\u01f5', - 'Gamma;': '\u0393', - 'gamma;': '\u03b3', - 'Gammad;': '\u03dc', - 'gammad;': '\u03dd', - 'gap;': '\u2a86', - 'Gbreve;': '\u011e', - 'gbreve;': '\u011f', - 'Gcedil;': '\u0122', - 'Gcirc;': '\u011c', - 'gcirc;': '\u011d', - 'Gcy;': '\u0413', - 'gcy;': '\u0433', - 'Gdot;': '\u0120', - 'gdot;': '\u0121', - 'gE;': '\u2267', - 'ge;': '\u2265', - 'gEl;': '\u2a8c', - 'gel;': '\u22db', - 'geq;': '\u2265', - 'geqq;': '\u2267', - 'geqslant;': '\u2a7e', - 'ges;': '\u2a7e', - 'gescc;': '\u2aa9', - 'gesdot;': '\u2a80', - 'gesdoto;': '\u2a82', - 'gesdotol;': '\u2a84', - 'gesl;': '\u22db\ufe00', - 'gesles;': '\u2a94', - 'Gfr;': '\U0001d50a', - 'gfr;': '\U0001d524', - 'Gg;': '\u22d9', - 'gg;': '\u226b', - 'ggg;': '\u22d9', - 'gimel;': '\u2137', - 'GJcy;': '\u0403', - 'gjcy;': '\u0453', - 'gl;': '\u2277', - 'gla;': '\u2aa5', - 'glE;': '\u2a92', - 'glj;': '\u2aa4', - 'gnap;': '\u2a8a', - 'gnapprox;': '\u2a8a', - 'gnE;': '\u2269', - 'gne;': '\u2a88', - 'gneq;': '\u2a88', - 'gneqq;': '\u2269', - 'gnsim;': '\u22e7', - 'Gopf;': '\U0001d53e', - 'gopf;': '\U0001d558', - 'grave;': '`', - 'GreaterEqual;': '\u2265', - 'GreaterEqualLess;': '\u22db', - 'GreaterFullEqual;': '\u2267', - 'GreaterGreater;': '\u2aa2', - 'GreaterLess;': '\u2277', - 'GreaterSlantEqual;': '\u2a7e', - 'GreaterTilde;': '\u2273', - 'Gscr;': '\U0001d4a2', - 'gscr;': '\u210a', - 'gsim;': '\u2273', - 'gsime;': '\u2a8e', - 'gsiml;': '\u2a90', - 'GT': '>', - 'gt': '>', - 'GT;': '>', - 'Gt;': '\u226b', - 'gt;': '>', - 'gtcc;': '\u2aa7', - 'gtcir;': '\u2a7a', - 'gtdot;': '\u22d7', - 'gtlPar;': '\u2995', - 'gtquest;': '\u2a7c', - 'gtrapprox;': '\u2a86', - 'gtrarr;': '\u2978', - 'gtrdot;': '\u22d7', - 'gtreqless;': '\u22db', - 'gtreqqless;': '\u2a8c', - 'gtrless;': '\u2277', - 'gtrsim;': '\u2273', - 'gvertneqq;': '\u2269\ufe00', - 'gvnE;': '\u2269\ufe00', - 'Hacek;': '\u02c7', - 'hairsp;': '\u200a', - 'half;': '\xbd', - 'hamilt;': '\u210b', - 'HARDcy;': '\u042a', - 'hardcy;': '\u044a', - 'hArr;': '\u21d4', - 'harr;': '\u2194', - 'harrcir;': '\u2948', - 'harrw;': '\u21ad', - 'Hat;': '^', - 'hbar;': '\u210f', - 'Hcirc;': '\u0124', - 'hcirc;': '\u0125', - 'hearts;': '\u2665', - 'heartsuit;': '\u2665', - 'hellip;': '\u2026', - 'hercon;': '\u22b9', - 'Hfr;': '\u210c', - 'hfr;': '\U0001d525', - 'HilbertSpace;': '\u210b', - 'hksearow;': '\u2925', - 'hkswarow;': '\u2926', - 'hoarr;': '\u21ff', - 'homtht;': '\u223b', - 'hookleftarrow;': '\u21a9', - 'hookrightarrow;': '\u21aa', - 'Hopf;': '\u210d', - 'hopf;': '\U0001d559', - 'horbar;': '\u2015', - 'HorizontalLine;': '\u2500', - 'Hscr;': '\u210b', - 'hscr;': '\U0001d4bd', - 'hslash;': '\u210f', - 'Hstrok;': '\u0126', - 'hstrok;': '\u0127', - 'HumpDownHump;': '\u224e', - 'HumpEqual;': '\u224f', - 'hybull;': '\u2043', - 'hyphen;': '\u2010', - 'Iacute': '\xcd', - 'iacute': '\xed', - 'Iacute;': '\xcd', - 'iacute;': '\xed', - 'ic;': '\u2063', - 'Icirc': '\xce', - 'icirc': '\xee', - 'Icirc;': '\xce', - 'icirc;': '\xee', - 'Icy;': '\u0418', - 'icy;': '\u0438', - 'Idot;': '\u0130', - 'IEcy;': '\u0415', - 'iecy;': '\u0435', - 'iexcl': '\xa1', - 'iexcl;': '\xa1', - 'iff;': '\u21d4', - 'Ifr;': '\u2111', - 'ifr;': '\U0001d526', - 'Igrave': '\xcc', - 'igrave': '\xec', - 'Igrave;': '\xcc', - 'igrave;': '\xec', - 'ii;': '\u2148', - 'iiiint;': '\u2a0c', - 'iiint;': '\u222d', - 'iinfin;': '\u29dc', - 'iiota;': '\u2129', - 'IJlig;': '\u0132', - 'ijlig;': '\u0133', - 'Im;': '\u2111', - 'Imacr;': '\u012a', - 'imacr;': '\u012b', - 'image;': '\u2111', - 'ImaginaryI;': '\u2148', - 'imagline;': '\u2110', - 'imagpart;': '\u2111', - 'imath;': '\u0131', - 'imof;': '\u22b7', - 'imped;': '\u01b5', - 'Implies;': '\u21d2', - 'in;': '\u2208', - 'incare;': '\u2105', - 'infin;': '\u221e', - 'infintie;': '\u29dd', - 'inodot;': '\u0131', - 'Int;': '\u222c', - 'int;': '\u222b', - 'intcal;': '\u22ba', - 'integers;': '\u2124', - 'Integral;': '\u222b', - 'intercal;': '\u22ba', - 'Intersection;': '\u22c2', - 'intlarhk;': '\u2a17', - 'intprod;': '\u2a3c', - 'InvisibleComma;': '\u2063', - 'InvisibleTimes;': '\u2062', - 'IOcy;': '\u0401', - 'iocy;': '\u0451', - 'Iogon;': '\u012e', - 'iogon;': '\u012f', - 'Iopf;': '\U0001d540', - 'iopf;': '\U0001d55a', - 'Iota;': '\u0399', - 'iota;': '\u03b9', - 'iprod;': '\u2a3c', - 'iquest': '\xbf', - 'iquest;': '\xbf', - 'Iscr;': '\u2110', - 'iscr;': '\U0001d4be', - 'isin;': '\u2208', - 'isindot;': '\u22f5', - 'isinE;': '\u22f9', - 'isins;': '\u22f4', - 'isinsv;': '\u22f3', - 'isinv;': '\u2208', - 'it;': '\u2062', - 'Itilde;': '\u0128', - 'itilde;': '\u0129', - 'Iukcy;': '\u0406', - 'iukcy;': '\u0456', - 'Iuml': '\xcf', - 'iuml': '\xef', - 'Iuml;': '\xcf', - 'iuml;': '\xef', - 'Jcirc;': '\u0134', - 'jcirc;': '\u0135', - 'Jcy;': '\u0419', - 'jcy;': '\u0439', - 'Jfr;': '\U0001d50d', - 'jfr;': '\U0001d527', - 'jmath;': '\u0237', - 'Jopf;': '\U0001d541', - 'jopf;': '\U0001d55b', - 'Jscr;': '\U0001d4a5', - 'jscr;': '\U0001d4bf', - 'Jsercy;': '\u0408', - 'jsercy;': '\u0458', - 'Jukcy;': '\u0404', - 'jukcy;': '\u0454', - 'Kappa;': '\u039a', - 'kappa;': '\u03ba', - 'kappav;': '\u03f0', - 'Kcedil;': '\u0136', - 'kcedil;': '\u0137', - 'Kcy;': '\u041a', - 'kcy;': '\u043a', - 'Kfr;': '\U0001d50e', - 'kfr;': '\U0001d528', - 'kgreen;': '\u0138', - 'KHcy;': '\u0425', - 'khcy;': '\u0445', - 'KJcy;': '\u040c', - 'kjcy;': '\u045c', - 'Kopf;': '\U0001d542', - 'kopf;': '\U0001d55c', - 'Kscr;': '\U0001d4a6', - 'kscr;': '\U0001d4c0', - 'lAarr;': '\u21da', - 'Lacute;': '\u0139', - 'lacute;': '\u013a', - 'laemptyv;': '\u29b4', - 'lagran;': '\u2112', - 'Lambda;': '\u039b', - 'lambda;': '\u03bb', - 'Lang;': '\u27ea', - 'lang;': '\u27e8', - 'langd;': '\u2991', - 'langle;': '\u27e8', - 'lap;': '\u2a85', - 'Laplacetrf;': '\u2112', - 'laquo': '\xab', - 'laquo;': '\xab', - 'Larr;': '\u219e', - 'lArr;': '\u21d0', - 'larr;': '\u2190', - 'larrb;': '\u21e4', - 'larrbfs;': '\u291f', - 'larrfs;': '\u291d', - 'larrhk;': '\u21a9', - 'larrlp;': '\u21ab', - 'larrpl;': '\u2939', - 'larrsim;': '\u2973', - 'larrtl;': '\u21a2', - 'lat;': '\u2aab', - 'lAtail;': '\u291b', - 'latail;': '\u2919', - 'late;': '\u2aad', - 'lates;': '\u2aad\ufe00', - 'lBarr;': '\u290e', - 'lbarr;': '\u290c', - 'lbbrk;': '\u2772', - 'lbrace;': '{', - 'lbrack;': '[', - 'lbrke;': '\u298b', - 'lbrksld;': '\u298f', - 'lbrkslu;': '\u298d', - 'Lcaron;': '\u013d', - 'lcaron;': '\u013e', - 'Lcedil;': '\u013b', - 'lcedil;': '\u013c', - 'lceil;': '\u2308', - 'lcub;': '{', - 'Lcy;': '\u041b', - 'lcy;': '\u043b', - 'ldca;': '\u2936', - 'ldquo;': '\u201c', - 'ldquor;': '\u201e', - 'ldrdhar;': '\u2967', - 'ldrushar;': '\u294b', - 'ldsh;': '\u21b2', - 'lE;': '\u2266', - 'le;': '\u2264', - 'LeftAngleBracket;': '\u27e8', - 'LeftArrow;': '\u2190', - 'Leftarrow;': '\u21d0', - 'leftarrow;': '\u2190', - 'LeftArrowBar;': '\u21e4', - 'LeftArrowRightArrow;': '\u21c6', - 'leftarrowtail;': '\u21a2', - 'LeftCeiling;': '\u2308', - 'LeftDoubleBracket;': '\u27e6', - 'LeftDownTeeVector;': '\u2961', - 'LeftDownVector;': '\u21c3', - 'LeftDownVectorBar;': '\u2959', - 'LeftFloor;': '\u230a', - 'leftharpoondown;': '\u21bd', - 'leftharpoonup;': '\u21bc', - 'leftleftarrows;': '\u21c7', - 'LeftRightArrow;': '\u2194', - 'Leftrightarrow;': '\u21d4', - 'leftrightarrow;': '\u2194', - 'leftrightarrows;': '\u21c6', - 'leftrightharpoons;': '\u21cb', - 'leftrightsquigarrow;': '\u21ad', - 'LeftRightVector;': '\u294e', - 'LeftTee;': '\u22a3', - 'LeftTeeArrow;': '\u21a4', - 'LeftTeeVector;': '\u295a', - 'leftthreetimes;': '\u22cb', - 'LeftTriangle;': '\u22b2', - 'LeftTriangleBar;': '\u29cf', - 'LeftTriangleEqual;': '\u22b4', - 'LeftUpDownVector;': '\u2951', - 'LeftUpTeeVector;': '\u2960', - 'LeftUpVector;': '\u21bf', - 'LeftUpVectorBar;': '\u2958', - 'LeftVector;': '\u21bc', - 'LeftVectorBar;': '\u2952', - 'lEg;': '\u2a8b', - 'leg;': '\u22da', - 'leq;': '\u2264', - 'leqq;': '\u2266', - 'leqslant;': '\u2a7d', - 'les;': '\u2a7d', - 'lescc;': '\u2aa8', - 'lesdot;': '\u2a7f', - 'lesdoto;': '\u2a81', - 'lesdotor;': '\u2a83', - 'lesg;': '\u22da\ufe00', - 'lesges;': '\u2a93', - 'lessapprox;': '\u2a85', - 'lessdot;': '\u22d6', - 'lesseqgtr;': '\u22da', - 'lesseqqgtr;': '\u2a8b', - 'LessEqualGreater;': '\u22da', - 'LessFullEqual;': '\u2266', - 'LessGreater;': '\u2276', - 'lessgtr;': '\u2276', - 'LessLess;': '\u2aa1', - 'lesssim;': '\u2272', - 'LessSlantEqual;': '\u2a7d', - 'LessTilde;': '\u2272', - 'lfisht;': '\u297c', - 'lfloor;': '\u230a', - 'Lfr;': '\U0001d50f', - 'lfr;': '\U0001d529', - 'lg;': '\u2276', - 'lgE;': '\u2a91', - 'lHar;': '\u2962', - 'lhard;': '\u21bd', - 'lharu;': '\u21bc', - 'lharul;': '\u296a', - 'lhblk;': '\u2584', - 'LJcy;': '\u0409', - 'ljcy;': '\u0459', - 'Ll;': '\u22d8', - 'll;': '\u226a', - 'llarr;': '\u21c7', - 'llcorner;': '\u231e', - 'Lleftarrow;': '\u21da', - 'llhard;': '\u296b', - 'lltri;': '\u25fa', - 'Lmidot;': '\u013f', - 'lmidot;': '\u0140', - 'lmoust;': '\u23b0', - 'lmoustache;': '\u23b0', - 'lnap;': '\u2a89', - 'lnapprox;': '\u2a89', - 'lnE;': '\u2268', - 'lne;': '\u2a87', - 'lneq;': '\u2a87', - 'lneqq;': '\u2268', - 'lnsim;': '\u22e6', - 'loang;': '\u27ec', - 'loarr;': '\u21fd', - 'lobrk;': '\u27e6', - 'LongLeftArrow;': '\u27f5', - 'Longleftarrow;': '\u27f8', - 'longleftarrow;': '\u27f5', - 'LongLeftRightArrow;': '\u27f7', - 'Longleftrightarrow;': '\u27fa', - 'longleftrightarrow;': '\u27f7', - 'longmapsto;': '\u27fc', - 'LongRightArrow;': '\u27f6', - 'Longrightarrow;': '\u27f9', - 'longrightarrow;': '\u27f6', - 'looparrowleft;': '\u21ab', - 'looparrowright;': '\u21ac', - 'lopar;': '\u2985', - 'Lopf;': '\U0001d543', - 'lopf;': '\U0001d55d', - 'loplus;': '\u2a2d', - 'lotimes;': '\u2a34', - 'lowast;': '\u2217', - 'lowbar;': '_', - 'LowerLeftArrow;': '\u2199', - 'LowerRightArrow;': '\u2198', - 'loz;': '\u25ca', - 'lozenge;': '\u25ca', - 'lozf;': '\u29eb', - 'lpar;': '(', - 'lparlt;': '\u2993', - 'lrarr;': '\u21c6', - 'lrcorner;': '\u231f', - 'lrhar;': '\u21cb', - 'lrhard;': '\u296d', - 'lrm;': '\u200e', - 'lrtri;': '\u22bf', - 'lsaquo;': '\u2039', - 'Lscr;': '\u2112', - 'lscr;': '\U0001d4c1', - 'Lsh;': '\u21b0', - 'lsh;': '\u21b0', - 'lsim;': '\u2272', - 'lsime;': '\u2a8d', - 'lsimg;': '\u2a8f', - 'lsqb;': '[', - 'lsquo;': '\u2018', - 'lsquor;': '\u201a', - 'Lstrok;': '\u0141', - 'lstrok;': '\u0142', - 'LT': '<', - 'lt': '<', - 'LT;': '<', - 'Lt;': '\u226a', - 'lt;': '<', - 'ltcc;': '\u2aa6', - 'ltcir;': '\u2a79', - 'ltdot;': '\u22d6', - 'lthree;': '\u22cb', - 'ltimes;': '\u22c9', - 'ltlarr;': '\u2976', - 'ltquest;': '\u2a7b', - 'ltri;': '\u25c3', - 'ltrie;': '\u22b4', - 'ltrif;': '\u25c2', - 'ltrPar;': '\u2996', - 'lurdshar;': '\u294a', - 'luruhar;': '\u2966', - 'lvertneqq;': '\u2268\ufe00', - 'lvnE;': '\u2268\ufe00', - 'macr': '\xaf', - 'macr;': '\xaf', - 'male;': '\u2642', - 'malt;': '\u2720', - 'maltese;': '\u2720', - 'Map;': '\u2905', - 'map;': '\u21a6', - 'mapsto;': '\u21a6', - 'mapstodown;': '\u21a7', - 'mapstoleft;': '\u21a4', - 'mapstoup;': '\u21a5', - 'marker;': '\u25ae', - 'mcomma;': '\u2a29', - 'Mcy;': '\u041c', - 'mcy;': '\u043c', - 'mdash;': '\u2014', - 'mDDot;': '\u223a', - 'measuredangle;': '\u2221', - 'MediumSpace;': '\u205f', - 'Mellintrf;': '\u2133', - 'Mfr;': '\U0001d510', - 'mfr;': '\U0001d52a', - 'mho;': '\u2127', - 'micro': '\xb5', - 'micro;': '\xb5', - 'mid;': '\u2223', - 'midast;': '*', - 'midcir;': '\u2af0', - 'middot': '\xb7', - 'middot;': '\xb7', - 'minus;': '\u2212', - 'minusb;': '\u229f', - 'minusd;': '\u2238', - 'minusdu;': '\u2a2a', - 'MinusPlus;': '\u2213', - 'mlcp;': '\u2adb', - 'mldr;': '\u2026', - 'mnplus;': '\u2213', - 'models;': '\u22a7', - 'Mopf;': '\U0001d544', - 'mopf;': '\U0001d55e', - 'mp;': '\u2213', - 'Mscr;': '\u2133', - 'mscr;': '\U0001d4c2', - 'mstpos;': '\u223e', - 'Mu;': '\u039c', - 'mu;': '\u03bc', - 'multimap;': '\u22b8', - 'mumap;': '\u22b8', - 'nabla;': '\u2207', - 'Nacute;': '\u0143', - 'nacute;': '\u0144', - 'nang;': '\u2220\u20d2', - 'nap;': '\u2249', - 'napE;': '\u2a70\u0338', - 'napid;': '\u224b\u0338', - 'napos;': '\u0149', - 'napprox;': '\u2249', - 'natur;': '\u266e', - 'natural;': '\u266e', - 'naturals;': '\u2115', - 'nbsp': '\xa0', - 'nbsp;': '\xa0', - 'nbump;': '\u224e\u0338', - 'nbumpe;': '\u224f\u0338', - 'ncap;': '\u2a43', - 'Ncaron;': '\u0147', - 'ncaron;': '\u0148', - 'Ncedil;': '\u0145', - 'ncedil;': '\u0146', - 'ncong;': '\u2247', - 'ncongdot;': '\u2a6d\u0338', - 'ncup;': '\u2a42', - 'Ncy;': '\u041d', - 'ncy;': '\u043d', - 'ndash;': '\u2013', - 'ne;': '\u2260', - 'nearhk;': '\u2924', - 'neArr;': '\u21d7', - 'nearr;': '\u2197', - 'nearrow;': '\u2197', - 'nedot;': '\u2250\u0338', - 'NegativeMediumSpace;': '\u200b', - 'NegativeThickSpace;': '\u200b', - 'NegativeThinSpace;': '\u200b', - 'NegativeVeryThinSpace;': '\u200b', - 'nequiv;': '\u2262', - 'nesear;': '\u2928', - 'nesim;': '\u2242\u0338', - 'NestedGreaterGreater;': '\u226b', - 'NestedLessLess;': '\u226a', - 'NewLine;': '\n', - 'nexist;': '\u2204', - 'nexists;': '\u2204', - 'Nfr;': '\U0001d511', - 'nfr;': '\U0001d52b', - 'ngE;': '\u2267\u0338', - 'nge;': '\u2271', - 'ngeq;': '\u2271', - 'ngeqq;': '\u2267\u0338', - 'ngeqslant;': '\u2a7e\u0338', - 'nges;': '\u2a7e\u0338', - 'nGg;': '\u22d9\u0338', - 'ngsim;': '\u2275', - 'nGt;': '\u226b\u20d2', - 'ngt;': '\u226f', - 'ngtr;': '\u226f', - 'nGtv;': '\u226b\u0338', - 'nhArr;': '\u21ce', - 'nharr;': '\u21ae', - 'nhpar;': '\u2af2', - 'ni;': '\u220b', - 'nis;': '\u22fc', - 'nisd;': '\u22fa', - 'niv;': '\u220b', - 'NJcy;': '\u040a', - 'njcy;': '\u045a', - 'nlArr;': '\u21cd', - 'nlarr;': '\u219a', - 'nldr;': '\u2025', - 'nlE;': '\u2266\u0338', - 'nle;': '\u2270', - 'nLeftarrow;': '\u21cd', - 'nleftarrow;': '\u219a', - 'nLeftrightarrow;': '\u21ce', - 'nleftrightarrow;': '\u21ae', - 'nleq;': '\u2270', - 'nleqq;': '\u2266\u0338', - 'nleqslant;': '\u2a7d\u0338', - 'nles;': '\u2a7d\u0338', - 'nless;': '\u226e', - 'nLl;': '\u22d8\u0338', - 'nlsim;': '\u2274', - 'nLt;': '\u226a\u20d2', - 'nlt;': '\u226e', - 'nltri;': '\u22ea', - 'nltrie;': '\u22ec', - 'nLtv;': '\u226a\u0338', - 'nmid;': '\u2224', - 'NoBreak;': '\u2060', - 'NonBreakingSpace;': '\xa0', - 'Nopf;': '\u2115', - 'nopf;': '\U0001d55f', - 'not': '\xac', - 'Not;': '\u2aec', - 'not;': '\xac', - 'NotCongruent;': '\u2262', - 'NotCupCap;': '\u226d', - 'NotDoubleVerticalBar;': '\u2226', - 'NotElement;': '\u2209', - 'NotEqual;': '\u2260', - 'NotEqualTilde;': '\u2242\u0338', - 'NotExists;': '\u2204', - 'NotGreater;': '\u226f', - 'NotGreaterEqual;': '\u2271', - 'NotGreaterFullEqual;': '\u2267\u0338', - 'NotGreaterGreater;': '\u226b\u0338', - 'NotGreaterLess;': '\u2279', - 'NotGreaterSlantEqual;': '\u2a7e\u0338', - 'NotGreaterTilde;': '\u2275', - 'NotHumpDownHump;': '\u224e\u0338', - 'NotHumpEqual;': '\u224f\u0338', - 'notin;': '\u2209', - 'notindot;': '\u22f5\u0338', - 'notinE;': '\u22f9\u0338', - 'notinva;': '\u2209', - 'notinvb;': '\u22f7', - 'notinvc;': '\u22f6', - 'NotLeftTriangle;': '\u22ea', - 'NotLeftTriangleBar;': '\u29cf\u0338', - 'NotLeftTriangleEqual;': '\u22ec', - 'NotLess;': '\u226e', - 'NotLessEqual;': '\u2270', - 'NotLessGreater;': '\u2278', - 'NotLessLess;': '\u226a\u0338', - 'NotLessSlantEqual;': '\u2a7d\u0338', - 'NotLessTilde;': '\u2274', - 'NotNestedGreaterGreater;': '\u2aa2\u0338', - 'NotNestedLessLess;': '\u2aa1\u0338', - 'notni;': '\u220c', - 'notniva;': '\u220c', - 'notnivb;': '\u22fe', - 'notnivc;': '\u22fd', - 'NotPrecedes;': '\u2280', - 'NotPrecedesEqual;': '\u2aaf\u0338', - 'NotPrecedesSlantEqual;': '\u22e0', - 'NotReverseElement;': '\u220c', - 'NotRightTriangle;': '\u22eb', - 'NotRightTriangleBar;': '\u29d0\u0338', - 'NotRightTriangleEqual;': '\u22ed', - 'NotSquareSubset;': '\u228f\u0338', - 'NotSquareSubsetEqual;': '\u22e2', - 'NotSquareSuperset;': '\u2290\u0338', - 'NotSquareSupersetEqual;': '\u22e3', - 'NotSubset;': '\u2282\u20d2', - 'NotSubsetEqual;': '\u2288', - 'NotSucceeds;': '\u2281', - 'NotSucceedsEqual;': '\u2ab0\u0338', - 'NotSucceedsSlantEqual;': '\u22e1', - 'NotSucceedsTilde;': '\u227f\u0338', - 'NotSuperset;': '\u2283\u20d2', - 'NotSupersetEqual;': '\u2289', - 'NotTilde;': '\u2241', - 'NotTildeEqual;': '\u2244', - 'NotTildeFullEqual;': '\u2247', - 'NotTildeTilde;': '\u2249', - 'NotVerticalBar;': '\u2224', - 'npar;': '\u2226', - 'nparallel;': '\u2226', - 'nparsl;': '\u2afd\u20e5', - 'npart;': '\u2202\u0338', - 'npolint;': '\u2a14', - 'npr;': '\u2280', - 'nprcue;': '\u22e0', - 'npre;': '\u2aaf\u0338', - 'nprec;': '\u2280', - 'npreceq;': '\u2aaf\u0338', - 'nrArr;': '\u21cf', - 'nrarr;': '\u219b', - 'nrarrc;': '\u2933\u0338', - 'nrarrw;': '\u219d\u0338', - 'nRightarrow;': '\u21cf', - 'nrightarrow;': '\u219b', - 'nrtri;': '\u22eb', - 'nrtrie;': '\u22ed', - 'nsc;': '\u2281', - 'nsccue;': '\u22e1', - 'nsce;': '\u2ab0\u0338', - 'Nscr;': '\U0001d4a9', - 'nscr;': '\U0001d4c3', - 'nshortmid;': '\u2224', - 'nshortparallel;': '\u2226', - 'nsim;': '\u2241', - 'nsime;': '\u2244', - 'nsimeq;': '\u2244', - 'nsmid;': '\u2224', - 'nspar;': '\u2226', - 'nsqsube;': '\u22e2', - 'nsqsupe;': '\u22e3', - 'nsub;': '\u2284', - 'nsubE;': '\u2ac5\u0338', - 'nsube;': '\u2288', - 'nsubset;': '\u2282\u20d2', - 'nsubseteq;': '\u2288', - 'nsubseteqq;': '\u2ac5\u0338', - 'nsucc;': '\u2281', - 'nsucceq;': '\u2ab0\u0338', - 'nsup;': '\u2285', - 'nsupE;': '\u2ac6\u0338', - 'nsupe;': '\u2289', - 'nsupset;': '\u2283\u20d2', - 'nsupseteq;': '\u2289', - 'nsupseteqq;': '\u2ac6\u0338', - 'ntgl;': '\u2279', - 'Ntilde': '\xd1', - 'ntilde': '\xf1', - 'Ntilde;': '\xd1', - 'ntilde;': '\xf1', - 'ntlg;': '\u2278', - 'ntriangleleft;': '\u22ea', - 'ntrianglelefteq;': '\u22ec', - 'ntriangleright;': '\u22eb', - 'ntrianglerighteq;': '\u22ed', - 'Nu;': '\u039d', - 'nu;': '\u03bd', - 'num;': '#', - 'numero;': '\u2116', - 'numsp;': '\u2007', - 'nvap;': '\u224d\u20d2', - 'nVDash;': '\u22af', - 'nVdash;': '\u22ae', - 'nvDash;': '\u22ad', - 'nvdash;': '\u22ac', - 'nvge;': '\u2265\u20d2', - 'nvgt;': '>\u20d2', - 'nvHarr;': '\u2904', - 'nvinfin;': '\u29de', - 'nvlArr;': '\u2902', - 'nvle;': '\u2264\u20d2', - 'nvlt;': '<\u20d2', - 'nvltrie;': '\u22b4\u20d2', - 'nvrArr;': '\u2903', - 'nvrtrie;': '\u22b5\u20d2', - 'nvsim;': '\u223c\u20d2', - 'nwarhk;': '\u2923', - 'nwArr;': '\u21d6', - 'nwarr;': '\u2196', - 'nwarrow;': '\u2196', - 'nwnear;': '\u2927', - 'Oacute': '\xd3', - 'oacute': '\xf3', - 'Oacute;': '\xd3', - 'oacute;': '\xf3', - 'oast;': '\u229b', - 'ocir;': '\u229a', - 'Ocirc': '\xd4', - 'ocirc': '\xf4', - 'Ocirc;': '\xd4', - 'ocirc;': '\xf4', - 'Ocy;': '\u041e', - 'ocy;': '\u043e', - 'odash;': '\u229d', - 'Odblac;': '\u0150', - 'odblac;': '\u0151', - 'odiv;': '\u2a38', - 'odot;': '\u2299', - 'odsold;': '\u29bc', - 'OElig;': '\u0152', - 'oelig;': '\u0153', - 'ofcir;': '\u29bf', - 'Ofr;': '\U0001d512', - 'ofr;': '\U0001d52c', - 'ogon;': '\u02db', - 'Ograve': '\xd2', - 'ograve': '\xf2', - 'Ograve;': '\xd2', - 'ograve;': '\xf2', - 'ogt;': '\u29c1', - 'ohbar;': '\u29b5', - 'ohm;': '\u03a9', - 'oint;': '\u222e', - 'olarr;': '\u21ba', - 'olcir;': '\u29be', - 'olcross;': '\u29bb', - 'oline;': '\u203e', - 'olt;': '\u29c0', - 'Omacr;': '\u014c', - 'omacr;': '\u014d', - 'Omega;': '\u03a9', - 'omega;': '\u03c9', - 'Omicron;': '\u039f', - 'omicron;': '\u03bf', - 'omid;': '\u29b6', - 'ominus;': '\u2296', - 'Oopf;': '\U0001d546', - 'oopf;': '\U0001d560', - 'opar;': '\u29b7', - 'OpenCurlyDoubleQuote;': '\u201c', - 'OpenCurlyQuote;': '\u2018', - 'operp;': '\u29b9', - 'oplus;': '\u2295', - 'Or;': '\u2a54', - 'or;': '\u2228', - 'orarr;': '\u21bb', - 'ord;': '\u2a5d', - 'order;': '\u2134', - 'orderof;': '\u2134', - 'ordf': '\xaa', - 'ordf;': '\xaa', - 'ordm': '\xba', - 'ordm;': '\xba', - 'origof;': '\u22b6', - 'oror;': '\u2a56', - 'orslope;': '\u2a57', - 'orv;': '\u2a5b', - 'oS;': '\u24c8', - 'Oscr;': '\U0001d4aa', - 'oscr;': '\u2134', - 'Oslash': '\xd8', - 'oslash': '\xf8', - 'Oslash;': '\xd8', - 'oslash;': '\xf8', - 'osol;': '\u2298', - 'Otilde': '\xd5', - 'otilde': '\xf5', - 'Otilde;': '\xd5', - 'otilde;': '\xf5', - 'Otimes;': '\u2a37', - 'otimes;': '\u2297', - 'otimesas;': '\u2a36', - 'Ouml': '\xd6', - 'ouml': '\xf6', - 'Ouml;': '\xd6', - 'ouml;': '\xf6', - 'ovbar;': '\u233d', - 'OverBar;': '\u203e', - 'OverBrace;': '\u23de', - 'OverBracket;': '\u23b4', - 'OverParenthesis;': '\u23dc', - 'par;': '\u2225', - 'para': '\xb6', - 'para;': '\xb6', - 'parallel;': '\u2225', - 'parsim;': '\u2af3', - 'parsl;': '\u2afd', - 'part;': '\u2202', - 'PartialD;': '\u2202', - 'Pcy;': '\u041f', - 'pcy;': '\u043f', - 'percnt;': '%', - 'period;': '.', - 'permil;': '\u2030', - 'perp;': '\u22a5', - 'pertenk;': '\u2031', - 'Pfr;': '\U0001d513', - 'pfr;': '\U0001d52d', - 'Phi;': '\u03a6', - 'phi;': '\u03c6', - 'phiv;': '\u03d5', - 'phmmat;': '\u2133', - 'phone;': '\u260e', - 'Pi;': '\u03a0', - 'pi;': '\u03c0', - 'pitchfork;': '\u22d4', - 'piv;': '\u03d6', - 'planck;': '\u210f', - 'planckh;': '\u210e', - 'plankv;': '\u210f', - 'plus;': '+', - 'plusacir;': '\u2a23', - 'plusb;': '\u229e', - 'pluscir;': '\u2a22', - 'plusdo;': '\u2214', - 'plusdu;': '\u2a25', - 'pluse;': '\u2a72', - 'PlusMinus;': '\xb1', - 'plusmn': '\xb1', - 'plusmn;': '\xb1', - 'plussim;': '\u2a26', - 'plustwo;': '\u2a27', - 'pm;': '\xb1', - 'Poincareplane;': '\u210c', - 'pointint;': '\u2a15', - 'Popf;': '\u2119', - 'popf;': '\U0001d561', - 'pound': '\xa3', - 'pound;': '\xa3', - 'Pr;': '\u2abb', - 'pr;': '\u227a', - 'prap;': '\u2ab7', - 'prcue;': '\u227c', - 'prE;': '\u2ab3', - 'pre;': '\u2aaf', - 'prec;': '\u227a', - 'precapprox;': '\u2ab7', - 'preccurlyeq;': '\u227c', - 'Precedes;': '\u227a', - 'PrecedesEqual;': '\u2aaf', - 'PrecedesSlantEqual;': '\u227c', - 'PrecedesTilde;': '\u227e', - 'preceq;': '\u2aaf', - 'precnapprox;': '\u2ab9', - 'precneqq;': '\u2ab5', - 'precnsim;': '\u22e8', - 'precsim;': '\u227e', - 'Prime;': '\u2033', - 'prime;': '\u2032', - 'primes;': '\u2119', - 'prnap;': '\u2ab9', - 'prnE;': '\u2ab5', - 'prnsim;': '\u22e8', - 'prod;': '\u220f', - 'Product;': '\u220f', - 'profalar;': '\u232e', - 'profline;': '\u2312', - 'profsurf;': '\u2313', - 'prop;': '\u221d', - 'Proportion;': '\u2237', - 'Proportional;': '\u221d', - 'propto;': '\u221d', - 'prsim;': '\u227e', - 'prurel;': '\u22b0', - 'Pscr;': '\U0001d4ab', - 'pscr;': '\U0001d4c5', - 'Psi;': '\u03a8', - 'psi;': '\u03c8', - 'puncsp;': '\u2008', - 'Qfr;': '\U0001d514', - 'qfr;': '\U0001d52e', - 'qint;': '\u2a0c', - 'Qopf;': '\u211a', - 'qopf;': '\U0001d562', - 'qprime;': '\u2057', - 'Qscr;': '\U0001d4ac', - 'qscr;': '\U0001d4c6', - 'quaternions;': '\u210d', - 'quatint;': '\u2a16', - 'quest;': '?', - 'questeq;': '\u225f', - 'QUOT': '"', - 'quot': '"', - 'QUOT;': '"', - 'quot;': '"', - 'rAarr;': '\u21db', - 'race;': '\u223d\u0331', - 'Racute;': '\u0154', - 'racute;': '\u0155', - 'radic;': '\u221a', - 'raemptyv;': '\u29b3', - 'Rang;': '\u27eb', - 'rang;': '\u27e9', - 'rangd;': '\u2992', - 'range;': '\u29a5', - 'rangle;': '\u27e9', - 'raquo': '\xbb', - 'raquo;': '\xbb', - 'Rarr;': '\u21a0', - 'rArr;': '\u21d2', - 'rarr;': '\u2192', - 'rarrap;': '\u2975', - 'rarrb;': '\u21e5', - 'rarrbfs;': '\u2920', - 'rarrc;': '\u2933', - 'rarrfs;': '\u291e', - 'rarrhk;': '\u21aa', - 'rarrlp;': '\u21ac', - 'rarrpl;': '\u2945', - 'rarrsim;': '\u2974', - 'Rarrtl;': '\u2916', - 'rarrtl;': '\u21a3', - 'rarrw;': '\u219d', - 'rAtail;': '\u291c', - 'ratail;': '\u291a', - 'ratio;': '\u2236', - 'rationals;': '\u211a', - 'RBarr;': '\u2910', - 'rBarr;': '\u290f', - 'rbarr;': '\u290d', - 'rbbrk;': '\u2773', - 'rbrace;': '}', - 'rbrack;': ']', - 'rbrke;': '\u298c', - 'rbrksld;': '\u298e', - 'rbrkslu;': '\u2990', - 'Rcaron;': '\u0158', - 'rcaron;': '\u0159', - 'Rcedil;': '\u0156', - 'rcedil;': '\u0157', - 'rceil;': '\u2309', - 'rcub;': '}', - 'Rcy;': '\u0420', - 'rcy;': '\u0440', - 'rdca;': '\u2937', - 'rdldhar;': '\u2969', - 'rdquo;': '\u201d', - 'rdquor;': '\u201d', - 'rdsh;': '\u21b3', - 'Re;': '\u211c', - 'real;': '\u211c', - 'realine;': '\u211b', - 'realpart;': '\u211c', - 'reals;': '\u211d', - 'rect;': '\u25ad', - 'REG': '\xae', - 'reg': '\xae', - 'REG;': '\xae', - 'reg;': '\xae', - 'ReverseElement;': '\u220b', - 'ReverseEquilibrium;': '\u21cb', - 'ReverseUpEquilibrium;': '\u296f', - 'rfisht;': '\u297d', - 'rfloor;': '\u230b', - 'Rfr;': '\u211c', - 'rfr;': '\U0001d52f', - 'rHar;': '\u2964', - 'rhard;': '\u21c1', - 'rharu;': '\u21c0', - 'rharul;': '\u296c', - 'Rho;': '\u03a1', - 'rho;': '\u03c1', - 'rhov;': '\u03f1', - 'RightAngleBracket;': '\u27e9', - 'RightArrow;': '\u2192', - 'Rightarrow;': '\u21d2', - 'rightarrow;': '\u2192', - 'RightArrowBar;': '\u21e5', - 'RightArrowLeftArrow;': '\u21c4', - 'rightarrowtail;': '\u21a3', - 'RightCeiling;': '\u2309', - 'RightDoubleBracket;': '\u27e7', - 'RightDownTeeVector;': '\u295d', - 'RightDownVector;': '\u21c2', - 'RightDownVectorBar;': '\u2955', - 'RightFloor;': '\u230b', - 'rightharpoondown;': '\u21c1', - 'rightharpoonup;': '\u21c0', - 'rightleftarrows;': '\u21c4', - 'rightleftharpoons;': '\u21cc', - 'rightrightarrows;': '\u21c9', - 'rightsquigarrow;': '\u219d', - 'RightTee;': '\u22a2', - 'RightTeeArrow;': '\u21a6', - 'RightTeeVector;': '\u295b', - 'rightthreetimes;': '\u22cc', - 'RightTriangle;': '\u22b3', - 'RightTriangleBar;': '\u29d0', - 'RightTriangleEqual;': '\u22b5', - 'RightUpDownVector;': '\u294f', - 'RightUpTeeVector;': '\u295c', - 'RightUpVector;': '\u21be', - 'RightUpVectorBar;': '\u2954', - 'RightVector;': '\u21c0', - 'RightVectorBar;': '\u2953', - 'ring;': '\u02da', - 'risingdotseq;': '\u2253', - 'rlarr;': '\u21c4', - 'rlhar;': '\u21cc', - 'rlm;': '\u200f', - 'rmoust;': '\u23b1', - 'rmoustache;': '\u23b1', - 'rnmid;': '\u2aee', - 'roang;': '\u27ed', - 'roarr;': '\u21fe', - 'robrk;': '\u27e7', - 'ropar;': '\u2986', - 'Ropf;': '\u211d', - 'ropf;': '\U0001d563', - 'roplus;': '\u2a2e', - 'rotimes;': '\u2a35', - 'RoundImplies;': '\u2970', - 'rpar;': ')', - 'rpargt;': '\u2994', - 'rppolint;': '\u2a12', - 'rrarr;': '\u21c9', - 'Rrightarrow;': '\u21db', - 'rsaquo;': '\u203a', - 'Rscr;': '\u211b', - 'rscr;': '\U0001d4c7', - 'Rsh;': '\u21b1', - 'rsh;': '\u21b1', - 'rsqb;': ']', - 'rsquo;': '\u2019', - 'rsquor;': '\u2019', - 'rthree;': '\u22cc', - 'rtimes;': '\u22ca', - 'rtri;': '\u25b9', - 'rtrie;': '\u22b5', - 'rtrif;': '\u25b8', - 'rtriltri;': '\u29ce', - 'RuleDelayed;': '\u29f4', - 'ruluhar;': '\u2968', - 'rx;': '\u211e', - 'Sacute;': '\u015a', - 'sacute;': '\u015b', - 'sbquo;': '\u201a', - 'Sc;': '\u2abc', - 'sc;': '\u227b', - 'scap;': '\u2ab8', - 'Scaron;': '\u0160', - 'scaron;': '\u0161', - 'sccue;': '\u227d', - 'scE;': '\u2ab4', - 'sce;': '\u2ab0', - 'Scedil;': '\u015e', - 'scedil;': '\u015f', - 'Scirc;': '\u015c', - 'scirc;': '\u015d', - 'scnap;': '\u2aba', - 'scnE;': '\u2ab6', - 'scnsim;': '\u22e9', - 'scpolint;': '\u2a13', - 'scsim;': '\u227f', - 'Scy;': '\u0421', - 'scy;': '\u0441', - 'sdot;': '\u22c5', - 'sdotb;': '\u22a1', - 'sdote;': '\u2a66', - 'searhk;': '\u2925', - 'seArr;': '\u21d8', - 'searr;': '\u2198', - 'searrow;': '\u2198', - 'sect': '\xa7', - 'sect;': '\xa7', - 'semi;': ';', - 'seswar;': '\u2929', - 'setminus;': '\u2216', - 'setmn;': '\u2216', - 'sext;': '\u2736', - 'Sfr;': '\U0001d516', - 'sfr;': '\U0001d530', - 'sfrown;': '\u2322', - 'sharp;': '\u266f', - 'SHCHcy;': '\u0429', - 'shchcy;': '\u0449', - 'SHcy;': '\u0428', - 'shcy;': '\u0448', - 'ShortDownArrow;': '\u2193', - 'ShortLeftArrow;': '\u2190', - 'shortmid;': '\u2223', - 'shortparallel;': '\u2225', - 'ShortRightArrow;': '\u2192', - 'ShortUpArrow;': '\u2191', - 'shy': '\xad', - 'shy;': '\xad', - 'Sigma;': '\u03a3', - 'sigma;': '\u03c3', - 'sigmaf;': '\u03c2', - 'sigmav;': '\u03c2', - 'sim;': '\u223c', - 'simdot;': '\u2a6a', - 'sime;': '\u2243', - 'simeq;': '\u2243', - 'simg;': '\u2a9e', - 'simgE;': '\u2aa0', - 'siml;': '\u2a9d', - 'simlE;': '\u2a9f', - 'simne;': '\u2246', - 'simplus;': '\u2a24', - 'simrarr;': '\u2972', - 'slarr;': '\u2190', - 'SmallCircle;': '\u2218', - 'smallsetminus;': '\u2216', - 'smashp;': '\u2a33', - 'smeparsl;': '\u29e4', - 'smid;': '\u2223', - 'smile;': '\u2323', - 'smt;': '\u2aaa', - 'smte;': '\u2aac', - 'smtes;': '\u2aac\ufe00', - 'SOFTcy;': '\u042c', - 'softcy;': '\u044c', - 'sol;': '/', - 'solb;': '\u29c4', - 'solbar;': '\u233f', - 'Sopf;': '\U0001d54a', - 'sopf;': '\U0001d564', - 'spades;': '\u2660', - 'spadesuit;': '\u2660', - 'spar;': '\u2225', - 'sqcap;': '\u2293', - 'sqcaps;': '\u2293\ufe00', - 'sqcup;': '\u2294', - 'sqcups;': '\u2294\ufe00', - 'Sqrt;': '\u221a', - 'sqsub;': '\u228f', - 'sqsube;': '\u2291', - 'sqsubset;': '\u228f', - 'sqsubseteq;': '\u2291', - 'sqsup;': '\u2290', - 'sqsupe;': '\u2292', - 'sqsupset;': '\u2290', - 'sqsupseteq;': '\u2292', - 'squ;': '\u25a1', - 'Square;': '\u25a1', - 'square;': '\u25a1', - 'SquareIntersection;': '\u2293', - 'SquareSubset;': '\u228f', - 'SquareSubsetEqual;': '\u2291', - 'SquareSuperset;': '\u2290', - 'SquareSupersetEqual;': '\u2292', - 'SquareUnion;': '\u2294', - 'squarf;': '\u25aa', - 'squf;': '\u25aa', - 'srarr;': '\u2192', - 'Sscr;': '\U0001d4ae', - 'sscr;': '\U0001d4c8', - 'ssetmn;': '\u2216', - 'ssmile;': '\u2323', - 'sstarf;': '\u22c6', - 'Star;': '\u22c6', - 'star;': '\u2606', - 'starf;': '\u2605', - 'straightepsilon;': '\u03f5', - 'straightphi;': '\u03d5', - 'strns;': '\xaf', - 'Sub;': '\u22d0', - 'sub;': '\u2282', - 'subdot;': '\u2abd', - 'subE;': '\u2ac5', - 'sube;': '\u2286', - 'subedot;': '\u2ac3', - 'submult;': '\u2ac1', - 'subnE;': '\u2acb', - 'subne;': '\u228a', - 'subplus;': '\u2abf', - 'subrarr;': '\u2979', - 'Subset;': '\u22d0', - 'subset;': '\u2282', - 'subseteq;': '\u2286', - 'subseteqq;': '\u2ac5', - 'SubsetEqual;': '\u2286', - 'subsetneq;': '\u228a', - 'subsetneqq;': '\u2acb', - 'subsim;': '\u2ac7', - 'subsub;': '\u2ad5', - 'subsup;': '\u2ad3', - 'succ;': '\u227b', - 'succapprox;': '\u2ab8', - 'succcurlyeq;': '\u227d', - 'Succeeds;': '\u227b', - 'SucceedsEqual;': '\u2ab0', - 'SucceedsSlantEqual;': '\u227d', - 'SucceedsTilde;': '\u227f', - 'succeq;': '\u2ab0', - 'succnapprox;': '\u2aba', - 'succneqq;': '\u2ab6', - 'succnsim;': '\u22e9', - 'succsim;': '\u227f', - 'SuchThat;': '\u220b', - 'Sum;': '\u2211', - 'sum;': '\u2211', - 'sung;': '\u266a', - 'sup1': '\xb9', - 'sup1;': '\xb9', - 'sup2': '\xb2', - 'sup2;': '\xb2', - 'sup3': '\xb3', - 'sup3;': '\xb3', - 'Sup;': '\u22d1', - 'sup;': '\u2283', - 'supdot;': '\u2abe', - 'supdsub;': '\u2ad8', - 'supE;': '\u2ac6', - 'supe;': '\u2287', - 'supedot;': '\u2ac4', - 'Superset;': '\u2283', - 'SupersetEqual;': '\u2287', - 'suphsol;': '\u27c9', - 'suphsub;': '\u2ad7', - 'suplarr;': '\u297b', - 'supmult;': '\u2ac2', - 'supnE;': '\u2acc', - 'supne;': '\u228b', - 'supplus;': '\u2ac0', - 'Supset;': '\u22d1', - 'supset;': '\u2283', - 'supseteq;': '\u2287', - 'supseteqq;': '\u2ac6', - 'supsetneq;': '\u228b', - 'supsetneqq;': '\u2acc', - 'supsim;': '\u2ac8', - 'supsub;': '\u2ad4', - 'supsup;': '\u2ad6', - 'swarhk;': '\u2926', - 'swArr;': '\u21d9', - 'swarr;': '\u2199', - 'swarrow;': '\u2199', - 'swnwar;': '\u292a', - 'szlig': '\xdf', - 'szlig;': '\xdf', - 'Tab;': '\t', - 'target;': '\u2316', - 'Tau;': '\u03a4', - 'tau;': '\u03c4', - 'tbrk;': '\u23b4', - 'Tcaron;': '\u0164', - 'tcaron;': '\u0165', - 'Tcedil;': '\u0162', - 'tcedil;': '\u0163', - 'Tcy;': '\u0422', - 'tcy;': '\u0442', - 'tdot;': '\u20db', - 'telrec;': '\u2315', - 'Tfr;': '\U0001d517', - 'tfr;': '\U0001d531', - 'there4;': '\u2234', - 'Therefore;': '\u2234', - 'therefore;': '\u2234', - 'Theta;': '\u0398', - 'theta;': '\u03b8', - 'thetasym;': '\u03d1', - 'thetav;': '\u03d1', - 'thickapprox;': '\u2248', - 'thicksim;': '\u223c', - 'ThickSpace;': '\u205f\u200a', - 'thinsp;': '\u2009', - 'ThinSpace;': '\u2009', - 'thkap;': '\u2248', - 'thksim;': '\u223c', - 'THORN': '\xde', - 'thorn': '\xfe', - 'THORN;': '\xde', - 'thorn;': '\xfe', - 'Tilde;': '\u223c', - 'tilde;': '\u02dc', - 'TildeEqual;': '\u2243', - 'TildeFullEqual;': '\u2245', - 'TildeTilde;': '\u2248', - 'times': '\xd7', - 'times;': '\xd7', - 'timesb;': '\u22a0', - 'timesbar;': '\u2a31', - 'timesd;': '\u2a30', - 'tint;': '\u222d', - 'toea;': '\u2928', - 'top;': '\u22a4', - 'topbot;': '\u2336', - 'topcir;': '\u2af1', - 'Topf;': '\U0001d54b', - 'topf;': '\U0001d565', - 'topfork;': '\u2ada', - 'tosa;': '\u2929', - 'tprime;': '\u2034', - 'TRADE;': '\u2122', - 'trade;': '\u2122', - 'triangle;': '\u25b5', - 'triangledown;': '\u25bf', - 'triangleleft;': '\u25c3', - 'trianglelefteq;': '\u22b4', - 'triangleq;': '\u225c', - 'triangleright;': '\u25b9', - 'trianglerighteq;': '\u22b5', - 'tridot;': '\u25ec', - 'trie;': '\u225c', - 'triminus;': '\u2a3a', - 'TripleDot;': '\u20db', - 'triplus;': '\u2a39', - 'trisb;': '\u29cd', - 'tritime;': '\u2a3b', - 'trpezium;': '\u23e2', - 'Tscr;': '\U0001d4af', - 'tscr;': '\U0001d4c9', - 'TScy;': '\u0426', - 'tscy;': '\u0446', - 'TSHcy;': '\u040b', - 'tshcy;': '\u045b', - 'Tstrok;': '\u0166', - 'tstrok;': '\u0167', - 'twixt;': '\u226c', - 'twoheadleftarrow;': '\u219e', - 'twoheadrightarrow;': '\u21a0', - 'Uacute': '\xda', - 'uacute': '\xfa', - 'Uacute;': '\xda', - 'uacute;': '\xfa', - 'Uarr;': '\u219f', - 'uArr;': '\u21d1', - 'uarr;': '\u2191', - 'Uarrocir;': '\u2949', - 'Ubrcy;': '\u040e', - 'ubrcy;': '\u045e', - 'Ubreve;': '\u016c', - 'ubreve;': '\u016d', - 'Ucirc': '\xdb', - 'ucirc': '\xfb', - 'Ucirc;': '\xdb', - 'ucirc;': '\xfb', - 'Ucy;': '\u0423', - 'ucy;': '\u0443', - 'udarr;': '\u21c5', - 'Udblac;': '\u0170', - 'udblac;': '\u0171', - 'udhar;': '\u296e', - 'ufisht;': '\u297e', - 'Ufr;': '\U0001d518', - 'ufr;': '\U0001d532', - 'Ugrave': '\xd9', - 'ugrave': '\xf9', - 'Ugrave;': '\xd9', - 'ugrave;': '\xf9', - 'uHar;': '\u2963', - 'uharl;': '\u21bf', - 'uharr;': '\u21be', - 'uhblk;': '\u2580', - 'ulcorn;': '\u231c', - 'ulcorner;': '\u231c', - 'ulcrop;': '\u230f', - 'ultri;': '\u25f8', - 'Umacr;': '\u016a', - 'umacr;': '\u016b', - 'uml': '\xa8', - 'uml;': '\xa8', - 'UnderBar;': '_', - 'UnderBrace;': '\u23df', - 'UnderBracket;': '\u23b5', - 'UnderParenthesis;': '\u23dd', - 'Union;': '\u22c3', - 'UnionPlus;': '\u228e', - 'Uogon;': '\u0172', - 'uogon;': '\u0173', - 'Uopf;': '\U0001d54c', - 'uopf;': '\U0001d566', - 'UpArrow;': '\u2191', - 'Uparrow;': '\u21d1', - 'uparrow;': '\u2191', - 'UpArrowBar;': '\u2912', - 'UpArrowDownArrow;': '\u21c5', - 'UpDownArrow;': '\u2195', - 'Updownarrow;': '\u21d5', - 'updownarrow;': '\u2195', - 'UpEquilibrium;': '\u296e', - 'upharpoonleft;': '\u21bf', - 'upharpoonright;': '\u21be', - 'uplus;': '\u228e', - 'UpperLeftArrow;': '\u2196', - 'UpperRightArrow;': '\u2197', - 'Upsi;': '\u03d2', - 'upsi;': '\u03c5', - 'upsih;': '\u03d2', - 'Upsilon;': '\u03a5', - 'upsilon;': '\u03c5', - 'UpTee;': '\u22a5', - 'UpTeeArrow;': '\u21a5', - 'upuparrows;': '\u21c8', - 'urcorn;': '\u231d', - 'urcorner;': '\u231d', - 'urcrop;': '\u230e', - 'Uring;': '\u016e', - 'uring;': '\u016f', - 'urtri;': '\u25f9', - 'Uscr;': '\U0001d4b0', - 'uscr;': '\U0001d4ca', - 'utdot;': '\u22f0', - 'Utilde;': '\u0168', - 'utilde;': '\u0169', - 'utri;': '\u25b5', - 'utrif;': '\u25b4', - 'uuarr;': '\u21c8', - 'Uuml': '\xdc', - 'uuml': '\xfc', - 'Uuml;': '\xdc', - 'uuml;': '\xfc', - 'uwangle;': '\u29a7', - 'vangrt;': '\u299c', - 'varepsilon;': '\u03f5', - 'varkappa;': '\u03f0', - 'varnothing;': '\u2205', - 'varphi;': '\u03d5', - 'varpi;': '\u03d6', - 'varpropto;': '\u221d', - 'vArr;': '\u21d5', - 'varr;': '\u2195', - 'varrho;': '\u03f1', - 'varsigma;': '\u03c2', - 'varsubsetneq;': '\u228a\ufe00', - 'varsubsetneqq;': '\u2acb\ufe00', - 'varsupsetneq;': '\u228b\ufe00', - 'varsupsetneqq;': '\u2acc\ufe00', - 'vartheta;': '\u03d1', - 'vartriangleleft;': '\u22b2', - 'vartriangleright;': '\u22b3', - 'Vbar;': '\u2aeb', - 'vBar;': '\u2ae8', - 'vBarv;': '\u2ae9', - 'Vcy;': '\u0412', - 'vcy;': '\u0432', - 'VDash;': '\u22ab', - 'Vdash;': '\u22a9', - 'vDash;': '\u22a8', - 'vdash;': '\u22a2', - 'Vdashl;': '\u2ae6', - 'Vee;': '\u22c1', - 'vee;': '\u2228', - 'veebar;': '\u22bb', - 'veeeq;': '\u225a', - 'vellip;': '\u22ee', - 'Verbar;': '\u2016', - 'verbar;': '|', - 'Vert;': '\u2016', - 'vert;': '|', - 'VerticalBar;': '\u2223', - 'VerticalLine;': '|', - 'VerticalSeparator;': '\u2758', - 'VerticalTilde;': '\u2240', - 'VeryThinSpace;': '\u200a', - 'Vfr;': '\U0001d519', - 'vfr;': '\U0001d533', - 'vltri;': '\u22b2', - 'vnsub;': '\u2282\u20d2', - 'vnsup;': '\u2283\u20d2', - 'Vopf;': '\U0001d54d', - 'vopf;': '\U0001d567', - 'vprop;': '\u221d', - 'vrtri;': '\u22b3', - 'Vscr;': '\U0001d4b1', - 'vscr;': '\U0001d4cb', - 'vsubnE;': '\u2acb\ufe00', - 'vsubne;': '\u228a\ufe00', - 'vsupnE;': '\u2acc\ufe00', - 'vsupne;': '\u228b\ufe00', - 'Vvdash;': '\u22aa', - 'vzigzag;': '\u299a', - 'Wcirc;': '\u0174', - 'wcirc;': '\u0175', - 'wedbar;': '\u2a5f', - 'Wedge;': '\u22c0', - 'wedge;': '\u2227', - 'wedgeq;': '\u2259', - 'weierp;': '\u2118', - 'Wfr;': '\U0001d51a', - 'wfr;': '\U0001d534', - 'Wopf;': '\U0001d54e', - 'wopf;': '\U0001d568', - 'wp;': '\u2118', - 'wr;': '\u2240', - 'wreath;': '\u2240', - 'Wscr;': '\U0001d4b2', - 'wscr;': '\U0001d4cc', - 'xcap;': '\u22c2', - 'xcirc;': '\u25ef', - 'xcup;': '\u22c3', - 'xdtri;': '\u25bd', - 'Xfr;': '\U0001d51b', - 'xfr;': '\U0001d535', - 'xhArr;': '\u27fa', - 'xharr;': '\u27f7', - 'Xi;': '\u039e', - 'xi;': '\u03be', - 'xlArr;': '\u27f8', - 'xlarr;': '\u27f5', - 'xmap;': '\u27fc', - 'xnis;': '\u22fb', - 'xodot;': '\u2a00', - 'Xopf;': '\U0001d54f', - 'xopf;': '\U0001d569', - 'xoplus;': '\u2a01', - 'xotime;': '\u2a02', - 'xrArr;': '\u27f9', - 'xrarr;': '\u27f6', - 'Xscr;': '\U0001d4b3', - 'xscr;': '\U0001d4cd', - 'xsqcup;': '\u2a06', - 'xuplus;': '\u2a04', - 'xutri;': '\u25b3', - 'xvee;': '\u22c1', - 'xwedge;': '\u22c0', - 'Yacute': '\xdd', - 'yacute': '\xfd', - 'Yacute;': '\xdd', - 'yacute;': '\xfd', - 'YAcy;': '\u042f', - 'yacy;': '\u044f', - 'Ycirc;': '\u0176', - 'ycirc;': '\u0177', - 'Ycy;': '\u042b', - 'ycy;': '\u044b', - 'yen': '\xa5', - 'yen;': '\xa5', - 'Yfr;': '\U0001d51c', - 'yfr;': '\U0001d536', - 'YIcy;': '\u0407', - 'yicy;': '\u0457', - 'Yopf;': '\U0001d550', - 'yopf;': '\U0001d56a', - 'Yscr;': '\U0001d4b4', - 'yscr;': '\U0001d4ce', - 'YUcy;': '\u042e', - 'yucy;': '\u044e', - 'yuml': '\xff', - 'Yuml;': '\u0178', - 'yuml;': '\xff', - 'Zacute;': '\u0179', - 'zacute;': '\u017a', - 'Zcaron;': '\u017d', - 'zcaron;': '\u017e', - 'Zcy;': '\u0417', - 'zcy;': '\u0437', - 'Zdot;': '\u017b', - 'zdot;': '\u017c', - 'zeetrf;': '\u2128', - 'ZeroWidthSpace;': '\u200b', - 'Zeta;': '\u0396', - 'zeta;': '\u03b6', - 'Zfr;': '\u2128', - 'zfr;': '\U0001d537', - 'ZHcy;': '\u0416', - 'zhcy;': '\u0436', - 'zigrarr;': '\u21dd', - 'Zopf;': '\u2124', - 'zopf;': '\U0001d56b', - 'Zscr;': '\U0001d4b5', - 'zscr;': '\U0001d4cf', - 'zwj;': '\u200d', - 'zwnj;': '\u200c', -} - -# maps the Unicode codepoint to the HTML entity name -codepoint2name = {} - -# maps the HTML entity name to the character -# (or a character reference if the character is outside the Latin-1 range) -entitydefs = {} - -for (name, codepoint) in name2codepoint.items(): - codepoint2name[codepoint] = name - entitydefs[name] = chr(codepoint) - -del name, codepoint diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/parser.py deleted file mode 100755 index fb65263..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/html/parser.py +++ /dev/null @@ -1,536 +0,0 @@ -"""A parser for HTML and XHTML. - -Backported for python-future from Python 3.3. -""" - -# This file is based on sgmllib.py, but the API is slightly different. - -# XXX There should be a way to distinguish between PCDATA (parsed -# character data -- the normal case), RCDATA (replaceable character -# data -- only char and entity references and end tags are special) -# and CDATA (character data -- only end tags are special). - -from __future__ import (absolute_import, division, - print_function, unicode_literals) -from future.builtins import * -from future.backports import _markupbase -import re -import warnings - -# Regular expressions used for parsing - -interesting_normal = re.compile('[&<]') -incomplete = re.compile('&[a-zA-Z#]') - -entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]') -charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]') - -starttagopen = re.compile('<[a-zA-Z]') -piclose = re.compile('>') -commentclose = re.compile(r'--\s*>') -tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') -# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state -# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state -tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') -# Note: -# 1) the strict attrfind isn't really strict, but we can't make it -# correctly strict without breaking backward compatibility; -# 2) if you change attrfind remember to update locatestarttagend too; -# 3) if you change attrfind and/or locatestarttagend the parser will -# explode, so don't do it. -attrfind = re.compile( - r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*' - r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?') -attrfind_tolerant = re.compile( - r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' - r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') -locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name - (?:\s+ # whitespace before attribute name - (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name - (?:\s*=\s* # value indicator - (?:'[^']*' # LITA-enclosed value - |\"[^\"]*\" # LIT-enclosed value - |[^'\">\s]+ # bare value - ) - )? - ) - )* - \s* # trailing whitespace -""", re.VERBOSE) -locatestarttagend_tolerant = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name - (?:[\s/]* # optional whitespace before attribute name - (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name - (?:\s*=+\s* # value indicator - (?:'[^']*' # LITA-enclosed value - |"[^"]*" # LIT-enclosed value - |(?!['"])[^>\s]* # bare value - ) - (?:\s*,)* # possibly followed by a comma - )?(?:\s|/(?!>))* - )* - )? - \s* # trailing whitespace -""", re.VERBOSE) -endendtag = re.compile('>') -# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between -# ') - - -class HTMLParseError(Exception): - """Exception raised for all parse errors.""" - - def __init__(self, msg, position=(None, None)): - assert msg - self.msg = msg - self.lineno = position[0] - self.offset = position[1] - - def __str__(self): - result = self.msg - if self.lineno is not None: - result = result + ", at line %d" % self.lineno - if self.offset is not None: - result = result + ", column %d" % (self.offset + 1) - return result - - -class HTMLParser(_markupbase.ParserBase): - """Find tags and other markup and call handler functions. - - Usage: - p = HTMLParser() - p.feed(data) - ... - p.close() - - Start tags are handled by calling self.handle_starttag() or - self.handle_startendtag(); end tags by self.handle_endtag(). The - data between tags is passed from the parser to the derived class - by calling self.handle_data() with the data as argument (the data - may be split up in arbitrary chunks). Entity references are - passed by calling self.handle_entityref() with the entity - reference as the argument. Numeric character references are - passed to self.handle_charref() with the string containing the - reference as the argument. - """ - - CDATA_CONTENT_ELEMENTS = ("script", "style") - - def __init__(self, strict=False): - """Initialize and reset this instance. - - If strict is set to False (the default) the parser will parse invalid - markup, otherwise it will raise an error. Note that the strict mode - is deprecated. - """ - if strict: - warnings.warn("The strict mode is deprecated.", - DeprecationWarning, stacklevel=2) - self.strict = strict - self.reset() - - def reset(self): - """Reset this instance. Loses all unprocessed data.""" - self.rawdata = '' - self.lasttag = '???' - self.interesting = interesting_normal - self.cdata_elem = None - _markupbase.ParserBase.reset(self) - - def feed(self, data): - r"""Feed data to the parser. - - Call this as often as you want, with as little or as much text - as you want (may include '\n'). - """ - self.rawdata = self.rawdata + data - self.goahead(0) - - def close(self): - """Handle any buffered data.""" - self.goahead(1) - - def error(self, message): - raise HTMLParseError(message, self.getpos()) - - __starttag_text = None - - def get_starttag_text(self): - """Return full source of start tag: '<...>'.""" - return self.__starttag_text - - def set_cdata_mode(self, elem): - self.cdata_elem = elem.lower() - self.interesting = re.compile(r'' % self.cdata_elem, re.I) - - def clear_cdata_mode(self): - self.interesting = interesting_normal - self.cdata_elem = None - - # Internal -- handle data as far as reasonable. May leave state - # and data to be processed by a subsequent call. If 'end' is - # true, force handling all data as if followed by EOF marker. - def goahead(self, end): - rawdata = self.rawdata - i = 0 - n = len(rawdata) - while i < n: - match = self.interesting.search(rawdata, i) # < or & - if match: - j = match.start() - else: - if self.cdata_elem: - break - j = n - if i < j: self.handle_data(rawdata[i:j]) - i = self.updatepos(i, j) - if i == n: break - startswith = rawdata.startswith - if startswith('<', i): - if starttagopen.match(rawdata, i): # < + letter - k = self.parse_starttag(i) - elif startswith("', i + 1) - if k < 0: - k = rawdata.find('<', i + 1) - if k < 0: - k = i + 1 - else: - k += 1 - self.handle_data(rawdata[i:k]) - i = self.updatepos(i, k) - elif startswith("&#", i): - match = charref.match(rawdata, i) - if match: - name = match.group()[2:-1] - self.handle_charref(name) - k = match.end() - if not startswith(';', k-1): - k = k - 1 - i = self.updatepos(i, k) - continue - else: - if ";" in rawdata[i:]: #bail by consuming &# - self.handle_data(rawdata[0:2]) - i = self.updatepos(i, 2) - break - elif startswith('&', i): - match = entityref.match(rawdata, i) - if match: - name = match.group(1) - self.handle_entityref(name) - k = match.end() - if not startswith(';', k-1): - k = k - 1 - i = self.updatepos(i, k) - continue - match = incomplete.match(rawdata, i) - if match: - # match.group() will contain at least 2 chars - if end and match.group() == rawdata[i:]: - if self.strict: - self.error("EOF in middle of entity or char ref") - else: - if k <= i: - k = n - i = self.updatepos(i, i + 1) - # incomplete - break - elif (i + 1) < n: - # not the end of the buffer, and can't be confused - # with some other construct - self.handle_data("&") - i = self.updatepos(i, i + 1) - else: - break - else: - assert 0, "interesting.search() lied" - # end while - if end and i < n and not self.cdata_elem: - self.handle_data(rawdata[i:n]) - i = self.updatepos(i, n) - self.rawdata = rawdata[i:] - - # Internal -- parse html declarations, return length or -1 if not terminated - # See w3.org/TR/html5/tokenization.html#markup-declaration-open-state - # See also parse_declaration in _markupbase - def parse_html_declaration(self, i): - rawdata = self.rawdata - assert rawdata[i:i+2] == ' - gtpos = rawdata.find('>', i+9) - if gtpos == -1: - return -1 - self.handle_decl(rawdata[i+2:gtpos]) - return gtpos+1 - else: - return self.parse_bogus_comment(i) - - # Internal -- parse bogus comment, return length or -1 if not terminated - # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state - def parse_bogus_comment(self, i, report=1): - rawdata = self.rawdata - assert rawdata[i:i+2] in ('', i+2) - if pos == -1: - return -1 - if report: - self.handle_comment(rawdata[i+2:pos]) - return pos + 1 - - # Internal -- parse processing instr, return end or -1 if not terminated - def parse_pi(self, i): - rawdata = self.rawdata - assert rawdata[i:i+2] == ' - if not match: - return -1 - j = match.start() - self.handle_pi(rawdata[i+2: j]) - j = match.end() - return j - - # Internal -- handle starttag, return end or -1 if not terminated - def parse_starttag(self, i): - self.__starttag_text = None - endpos = self.check_for_whole_start_tag(i) - if endpos < 0: - return endpos - rawdata = self.rawdata - self.__starttag_text = rawdata[i:endpos] - - # Now parse the data between i+1 and j into a tag and attrs - attrs = [] - match = tagfind.match(rawdata, i+1) - assert match, 'unexpected call to parse_starttag()' - k = match.end() - self.lasttag = tag = match.group(1).lower() - while k < endpos: - if self.strict: - m = attrfind.match(rawdata, k) - else: - m = attrfind_tolerant.match(rawdata, k) - if not m: - break - attrname, rest, attrvalue = m.group(1, 2, 3) - if not rest: - attrvalue = None - elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ - attrvalue[:1] == '"' == attrvalue[-1:]: - attrvalue = attrvalue[1:-1] - if attrvalue: - attrvalue = self.unescape(attrvalue) - attrs.append((attrname.lower(), attrvalue)) - k = m.end() - - end = rawdata[k:endpos].strip() - if end not in (">", "/>"): - lineno, offset = self.getpos() - if "\n" in self.__starttag_text: - lineno = lineno + self.__starttag_text.count("\n") - offset = len(self.__starttag_text) \ - - self.__starttag_text.rfind("\n") - else: - offset = offset + len(self.__starttag_text) - if self.strict: - self.error("junk characters in start tag: %r" - % (rawdata[k:endpos][:20],)) - self.handle_data(rawdata[i:endpos]) - return endpos - if end.endswith('/>'): - # XHTML-style empty tag: - self.handle_startendtag(tag, attrs) - else: - self.handle_starttag(tag, attrs) - if tag in self.CDATA_CONTENT_ELEMENTS: - self.set_cdata_mode(tag) - return endpos - - # Internal -- check to see if we have a complete starttag; return end - # or -1 if incomplete. - def check_for_whole_start_tag(self, i): - rawdata = self.rawdata - if self.strict: - m = locatestarttagend.match(rawdata, i) - else: - m = locatestarttagend_tolerant.match(rawdata, i) - if m: - j = m.end() - next = rawdata[j:j+1] - if next == ">": - return j + 1 - if next == "/": - if rawdata.startswith("/>", j): - return j + 2 - if rawdata.startswith("/", j): - # buffer boundary - return -1 - # else bogus input - if self.strict: - self.updatepos(i, j + 1) - self.error("malformed empty start tag") - if j > i: - return j - else: - return i + 1 - if next == "": - # end of input - return -1 - if next in ("abcdefghijklmnopqrstuvwxyz=/" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): - # end of input in or before attribute value, or we have the - # '/' from a '/>' ending - return -1 - if self.strict: - self.updatepos(i, j) - self.error("malformed start tag") - if j > i: - return j - else: - return i + 1 - raise AssertionError("we should not get here!") - - # Internal -- parse endtag, return end or -1 if incomplete - def parse_endtag(self, i): - rawdata = self.rawdata - assert rawdata[i:i+2] == " - if not match: - return -1 - gtpos = match.end() - match = endtagfind.match(rawdata, i) # - if not match: - if self.cdata_elem is not None: - self.handle_data(rawdata[i:gtpos]) - return gtpos - if self.strict: - self.error("bad end tag: %r" % (rawdata[i:gtpos],)) - # find the name: w3.org/TR/html5/tokenization.html#tag-name-state - namematch = tagfind_tolerant.match(rawdata, i+2) - if not namematch: - # w3.org/TR/html5/tokenization.html#end-tag-open-state - if rawdata[i:i+3] == '': - return i+3 - else: - return self.parse_bogus_comment(i) - tagname = namematch.group().lower() - # consume and ignore other stuff between the name and the > - # Note: this is not 100% correct, since we might have things like - # , but looking for > after tha name should cover - # most of the cases and is much simpler - gtpos = rawdata.find('>', namematch.end()) - self.handle_endtag(tagname) - return gtpos+1 - - elem = match.group(1).lower() # script or style - if self.cdata_elem is not None: - if elem != self.cdata_elem: - self.handle_data(rawdata[i:gtpos]) - return gtpos - - self.handle_endtag(elem.lower()) - self.clear_cdata_mode() - return gtpos - - # Overridable -- finish processing of start+end tag: - def handle_startendtag(self, tag, attrs): - self.handle_starttag(tag, attrs) - self.handle_endtag(tag) - - # Overridable -- handle start tag - def handle_starttag(self, tag, attrs): - pass - - # Overridable -- handle end tag - def handle_endtag(self, tag): - pass - - # Overridable -- handle character reference - def handle_charref(self, name): - pass - - # Overridable -- handle entity reference - def handle_entityref(self, name): - pass - - # Overridable -- handle data - def handle_data(self, data): - pass - - # Overridable -- handle comment - def handle_comment(self, data): - pass - - # Overridable -- handle declaration - def handle_decl(self, decl): - pass - - # Overridable -- handle processing instruction - def handle_pi(self, data): - pass - - def unknown_decl(self, data): - if self.strict: - self.error("unknown declaration: %r" % (data,)) - - # Internal -- helper to remove special character quoting - def unescape(self, s): - if '&' not in s: - return s - def replaceEntities(s): - s = s.groups()[0] - try: - if s[0] == "#": - s = s[1:] - if s[0] in ['x','X']: - c = int(s[1:].rstrip(';'), 16) - else: - c = int(s.rstrip(';')) - return chr(c) - except ValueError: - return '&#' + s - else: - from future.backports.html.entities import html5 - if s in html5: - return html5[s] - elif s.endswith(';'): - return '&' + s - for x in range(2, len(s)): - if s[:x] in html5: - return html5[s[:x]] + s[x:] - else: - return '&' + s - - return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))", - replaceEntities, s) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/client.py deleted file mode 100755 index e663d12..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/client.py +++ /dev/null @@ -1,1346 +0,0 @@ -"""HTTP/1.1 client library - -A backport of the Python 3.3 http/client.py module for python-future. - - - - -HTTPConnection goes through a number of "states", which define when a client -may legally make another request or fetch the response for a particular -request. This diagram details these state transitions: - - (null) - | - | HTTPConnection() - v - Idle - | - | putrequest() - v - Request-started - | - | ( putheader() )* endheaders() - v - Request-sent - | - | response = getresponse() - v - Unread-response [Response-headers-read] - |\____________________ - | | - | response.read() | putrequest() - v v - Idle Req-started-unread-response - ______/| - / | - response.read() | | ( putheader() )* endheaders() - v v - Request-started Req-sent-unread-response - | - | response.read() - v - Request-sent - -This diagram presents the following rules: - -- a second request may not be started until {response-headers-read} - -- a response [object] cannot be retrieved until {request-sent} - -- there is no differentiation between an unread response body and a - partially read response body - -Note: this enforcement is applied by the HTTPConnection class. The - HTTPResponse class does not enforce this state machine, which - implies sophisticated clients may accelerate the request/response - pipeline. Caution should be taken, though: accelerating the states - beyond the above pattern may imply knowledge of the server's - connection-close behavior for certain requests. For example, it - is impossible to tell whether the server will close the connection - UNTIL the response headers have been read; this means that further - requests cannot be placed into the pipeline until it is known that - the server will NOT be closing the connection. - -Logical State __state __response -------------- ------- ---------- -Idle _CS_IDLE None -Request-started _CS_REQ_STARTED None -Request-sent _CS_REQ_SENT None -Unread-response _CS_IDLE -Req-started-unread-response _CS_REQ_STARTED -Req-sent-unread-response _CS_REQ_SENT -""" - -from __future__ import (absolute_import, division, - print_function, unicode_literals) -from future.builtins import bytes, int, str, super -from future.utils import PY2 - -from future.backports.email import parser as email_parser -from future.backports.email import message as email_message -from future.backports.misc import create_connection as socket_create_connection -import io -import os -import socket -from future.backports.urllib.parse import urlsplit -import warnings -from array import array - -if PY2: - from collections import Iterable -else: - from collections.abc import Iterable - -__all__ = ["HTTPResponse", "HTTPConnection", - "HTTPException", "NotConnected", "UnknownProtocol", - "UnknownTransferEncoding", "UnimplementedFileMode", - "IncompleteRead", "InvalidURL", "ImproperConnectionState", - "CannotSendRequest", "CannotSendHeader", "ResponseNotReady", - "BadStatusLine", "error", "responses"] - -HTTP_PORT = 80 -HTTPS_PORT = 443 - -_UNKNOWN = 'UNKNOWN' - -# connection states -_CS_IDLE = 'Idle' -_CS_REQ_STARTED = 'Request-started' -_CS_REQ_SENT = 'Request-sent' - -# status codes -# informational -CONTINUE = 100 -SWITCHING_PROTOCOLS = 101 -PROCESSING = 102 - -# successful -OK = 200 -CREATED = 201 -ACCEPTED = 202 -NON_AUTHORITATIVE_INFORMATION = 203 -NO_CONTENT = 204 -RESET_CONTENT = 205 -PARTIAL_CONTENT = 206 -MULTI_STATUS = 207 -IM_USED = 226 - -# redirection -MULTIPLE_CHOICES = 300 -MOVED_PERMANENTLY = 301 -FOUND = 302 -SEE_OTHER = 303 -NOT_MODIFIED = 304 -USE_PROXY = 305 -TEMPORARY_REDIRECT = 307 - -# client error -BAD_REQUEST = 400 -UNAUTHORIZED = 401 -PAYMENT_REQUIRED = 402 -FORBIDDEN = 403 -NOT_FOUND = 404 -METHOD_NOT_ALLOWED = 405 -NOT_ACCEPTABLE = 406 -PROXY_AUTHENTICATION_REQUIRED = 407 -REQUEST_TIMEOUT = 408 -CONFLICT = 409 -GONE = 410 -LENGTH_REQUIRED = 411 -PRECONDITION_FAILED = 412 -REQUEST_ENTITY_TOO_LARGE = 413 -REQUEST_URI_TOO_LONG = 414 -UNSUPPORTED_MEDIA_TYPE = 415 -REQUESTED_RANGE_NOT_SATISFIABLE = 416 -EXPECTATION_FAILED = 417 -UNPROCESSABLE_ENTITY = 422 -LOCKED = 423 -FAILED_DEPENDENCY = 424 -UPGRADE_REQUIRED = 426 -PRECONDITION_REQUIRED = 428 -TOO_MANY_REQUESTS = 429 -REQUEST_HEADER_FIELDS_TOO_LARGE = 431 - -# server error -INTERNAL_SERVER_ERROR = 500 -NOT_IMPLEMENTED = 501 -BAD_GATEWAY = 502 -SERVICE_UNAVAILABLE = 503 -GATEWAY_TIMEOUT = 504 -HTTP_VERSION_NOT_SUPPORTED = 505 -INSUFFICIENT_STORAGE = 507 -NOT_EXTENDED = 510 -NETWORK_AUTHENTICATION_REQUIRED = 511 - -# Mapping status codes to official W3C names -responses = { - 100: 'Continue', - 101: 'Switching Protocols', - - 200: 'OK', - 201: 'Created', - 202: 'Accepted', - 203: 'Non-Authoritative Information', - 204: 'No Content', - 205: 'Reset Content', - 206: 'Partial Content', - - 300: 'Multiple Choices', - 301: 'Moved Permanently', - 302: 'Found', - 303: 'See Other', - 304: 'Not Modified', - 305: 'Use Proxy', - 306: '(Unused)', - 307: 'Temporary Redirect', - - 400: 'Bad Request', - 401: 'Unauthorized', - 402: 'Payment Required', - 403: 'Forbidden', - 404: 'Not Found', - 405: 'Method Not Allowed', - 406: 'Not Acceptable', - 407: 'Proxy Authentication Required', - 408: 'Request Timeout', - 409: 'Conflict', - 410: 'Gone', - 411: 'Length Required', - 412: 'Precondition Failed', - 413: 'Request Entity Too Large', - 414: 'Request-URI Too Long', - 415: 'Unsupported Media Type', - 416: 'Requested Range Not Satisfiable', - 417: 'Expectation Failed', - 428: 'Precondition Required', - 429: 'Too Many Requests', - 431: 'Request Header Fields Too Large', - - 500: 'Internal Server Error', - 501: 'Not Implemented', - 502: 'Bad Gateway', - 503: 'Service Unavailable', - 504: 'Gateway Timeout', - 505: 'HTTP Version Not Supported', - 511: 'Network Authentication Required', -} - -# maximal amount of data to read at one time in _safe_read -MAXAMOUNT = 1048576 - -# maximal line length when calling readline(). -_MAXLINE = 65536 -_MAXHEADERS = 100 - - -class HTTPMessage(email_message.Message): - # XXX The only usage of this method is in - # http.server.CGIHTTPRequestHandler. Maybe move the code there so - # that it doesn't need to be part of the public API. The API has - # never been defined so this could cause backwards compatibility - # issues. - - def getallmatchingheaders(self, name): - """Find all header lines matching a given header name. - - Look through the list of headers and find all lines matching a given - header name (and their continuation lines). A list of the lines is - returned, without interpretation. If the header does not occur, an - empty list is returned. If the header occurs multiple times, all - occurrences are returned. Case is not important in the header name. - - """ - name = name.lower() + ':' - n = len(name) - lst = [] - hit = 0 - for line in self.keys(): - if line[:n].lower() == name: - hit = 1 - elif not line[:1].isspace(): - hit = 0 - if hit: - lst.append(line) - return lst - -def parse_headers(fp, _class=HTTPMessage): - """Parses only RFC2822 headers from a file pointer. - - email Parser wants to see strings rather than bytes. - But a TextIOWrapper around self.rfile would buffer too many bytes - from the stream, bytes which we later need to read as bytes. - So we read the correct bytes here, as bytes, for email Parser - to parse. - - """ - headers = [] - while True: - line = fp.readline(_MAXLINE + 1) - if len(line) > _MAXLINE: - raise LineTooLong("header line") - headers.append(line) - if len(headers) > _MAXHEADERS: - raise HTTPException("got more than %d headers" % _MAXHEADERS) - if line in (b'\r\n', b'\n', b''): - break - hstring = bytes(b'').join(headers).decode('iso-8859-1') - return email_parser.Parser(_class=_class).parsestr(hstring) - - -_strict_sentinel = object() - -class HTTPResponse(io.RawIOBase): - - # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details. - - # The bytes from the socket object are iso-8859-1 strings. - # See RFC 2616 sec 2.2 which notes an exception for MIME-encoded - # text following RFC 2047. The basic status line parsing only - # accepts iso-8859-1. - - def __init__(self, sock, debuglevel=0, strict=_strict_sentinel, method=None, url=None): - # If the response includes a content-length header, we need to - # make sure that the client doesn't read more than the - # specified number of bytes. If it does, it will block until - # the server times out and closes the connection. This will - # happen if a self.fp.read() is done (without a size) whether - # self.fp is buffered or not. So, no self.fp.read() by - # clients unless they know what they are doing. - self.fp = sock.makefile("rb") - self.debuglevel = debuglevel - if strict is not _strict_sentinel: - warnings.warn("the 'strict' argument isn't supported anymore; " - "http.client now always assumes HTTP/1.x compliant servers.", - DeprecationWarning, 2) - self._method = method - - # The HTTPResponse object is returned via urllib. The clients - # of http and urllib expect different attributes for the - # headers. headers is used here and supports urllib. msg is - # provided as a backwards compatibility layer for http - # clients. - - self.headers = self.msg = None - - # from the Status-Line of the response - self.version = _UNKNOWN # HTTP-Version - self.status = _UNKNOWN # Status-Code - self.reason = _UNKNOWN # Reason-Phrase - - self.chunked = _UNKNOWN # is "chunked" being used? - self.chunk_left = _UNKNOWN # bytes left to read in current chunk - self.length = _UNKNOWN # number of bytes left in response - self.will_close = _UNKNOWN # conn will close at end of response - - def _read_status(self): - line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1") - if len(line) > _MAXLINE: - raise LineTooLong("status line") - if self.debuglevel > 0: - print("reply:", repr(line)) - if not line: - # Presumably, the server closed the connection before - # sending a valid response. - raise BadStatusLine(line) - try: - version, status, reason = line.split(None, 2) - except ValueError: - try: - version, status = line.split(None, 1) - reason = "" - except ValueError: - # empty version will cause next test to fail. - version = "" - if not version.startswith("HTTP/"): - self._close_conn() - raise BadStatusLine(line) - - # The status code is a three-digit number - try: - status = int(status) - if status < 100 or status > 999: - raise BadStatusLine(line) - except ValueError: - raise BadStatusLine(line) - return version, status, reason - - def begin(self): - if self.headers is not None: - # we've already started reading the response - return - - # read until we get a non-100 response - while True: - version, status, reason = self._read_status() - if status != CONTINUE: - break - # skip the header from the 100 response - while True: - skip = self.fp.readline(_MAXLINE + 1) - if len(skip) > _MAXLINE: - raise LineTooLong("header line") - skip = skip.strip() - if not skip: - break - if self.debuglevel > 0: - print("header:", skip) - - self.code = self.status = status - self.reason = reason.strip() - if version in ("HTTP/1.0", "HTTP/0.9"): - # Some servers might still return "0.9", treat it as 1.0 anyway - self.version = 10 - elif version.startswith("HTTP/1."): - self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1 - else: - raise UnknownProtocol(version) - - self.headers = self.msg = parse_headers(self.fp) - - if self.debuglevel > 0: - for hdr in self.headers: - print("header:", hdr, end=" ") - - # are we using the chunked-style of transfer encoding? - tr_enc = self.headers.get("transfer-encoding") - if tr_enc and tr_enc.lower() == "chunked": - self.chunked = True - self.chunk_left = None - else: - self.chunked = False - - # will the connection close at the end of the response? - self.will_close = self._check_close() - - # do we have a Content-Length? - # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" - self.length = None - length = self.headers.get("content-length") - - # are we using the chunked-style of transfer encoding? - tr_enc = self.headers.get("transfer-encoding") - if length and not self.chunked: - try: - self.length = int(length) - except ValueError: - self.length = None - else: - if self.length < 0: # ignore nonsensical negative lengths - self.length = None - else: - self.length = None - - # does the body have a fixed length? (of zero) - if (status == NO_CONTENT or status == NOT_MODIFIED or - 100 <= status < 200 or # 1xx codes - self._method == "HEAD"): - self.length = 0 - - # if the connection remains open, and we aren't using chunked, and - # a content-length was not provided, then assume that the connection - # WILL close. - if (not self.will_close and - not self.chunked and - self.length is None): - self.will_close = True - - def _check_close(self): - conn = self.headers.get("connection") - if self.version == 11: - # An HTTP/1.1 proxy is assumed to stay open unless - # explicitly closed. - conn = self.headers.get("connection") - if conn and "close" in conn.lower(): - return True - return False - - # Some HTTP/1.0 implementations have support for persistent - # connections, using rules different than HTTP/1.1. - - # For older HTTP, Keep-Alive indicates persistent connection. - if self.headers.get("keep-alive"): - return False - - # At least Akamai returns a "Connection: Keep-Alive" header, - # which was supposed to be sent by the client. - if conn and "keep-alive" in conn.lower(): - return False - - # Proxy-Connection is a netscape hack. - pconn = self.headers.get("proxy-connection") - if pconn and "keep-alive" in pconn.lower(): - return False - - # otherwise, assume it will close - return True - - def _close_conn(self): - fp = self.fp - self.fp = None - fp.close() - - def close(self): - super().close() # set "closed" flag - if self.fp: - self._close_conn() - - # These implementations are for the benefit of io.BufferedReader. - - # XXX This class should probably be revised to act more like - # the "raw stream" that BufferedReader expects. - - def flush(self): - super().flush() - if self.fp: - self.fp.flush() - - def readable(self): - return True - - # End of "raw stream" methods - - def isclosed(self): - """True if the connection is closed.""" - # NOTE: it is possible that we will not ever call self.close(). This - # case occurs when will_close is TRUE, length is None, and we - # read up to the last byte, but NOT past it. - # - # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be - # called, meaning self.isclosed() is meaningful. - return self.fp is None - - def read(self, amt=None): - if self.fp is None: - return bytes(b"") - - if self._method == "HEAD": - self._close_conn() - return bytes(b"") - - if amt is not None: - # Amount is given, so call base class version - # (which is implemented in terms of self.readinto) - return bytes(super(HTTPResponse, self).read(amt)) - else: - # Amount is not given (unbounded read) so we must check self.length - # and self.chunked - - if self.chunked: - return self._readall_chunked() - - if self.length is None: - s = self.fp.read() - else: - try: - s = self._safe_read(self.length) - except IncompleteRead: - self._close_conn() - raise - self.length = 0 - self._close_conn() # we read everything - return bytes(s) - - def readinto(self, b): - if self.fp is None: - return 0 - - if self._method == "HEAD": - self._close_conn() - return 0 - - if self.chunked: - return self._readinto_chunked(b) - - if self.length is not None: - if len(b) > self.length: - # clip the read to the "end of response" - b = memoryview(b)[0:self.length] - - # we do not use _safe_read() here because this may be a .will_close - # connection, and the user is reading more bytes than will be provided - # (for example, reading in 1k chunks) - - if PY2: - data = self.fp.read(len(b)) - n = len(data) - b[:n] = data - else: - n = self.fp.readinto(b) - - if not n and b: - # Ideally, we would raise IncompleteRead if the content-length - # wasn't satisfied, but it might break compatibility. - self._close_conn() - elif self.length is not None: - self.length -= n - if not self.length: - self._close_conn() - return n - - def _read_next_chunk_size(self): - # Read the next chunk size from the file - line = self.fp.readline(_MAXLINE + 1) - if len(line) > _MAXLINE: - raise LineTooLong("chunk size") - i = line.find(b";") - if i >= 0: - line = line[:i] # strip chunk-extensions - try: - return int(line, 16) - except ValueError: - # close the connection as protocol synchronisation is - # probably lost - self._close_conn() - raise - - def _read_and_discard_trailer(self): - # read and discard trailer up to the CRLF terminator - ### note: we shouldn't have any trailers! - while True: - line = self.fp.readline(_MAXLINE + 1) - if len(line) > _MAXLINE: - raise LineTooLong("trailer line") - if not line: - # a vanishingly small number of sites EOF without - # sending the trailer - break - if line in (b'\r\n', b'\n', b''): - break - - def _readall_chunked(self): - assert self.chunked != _UNKNOWN - chunk_left = self.chunk_left - value = [] - while True: - if chunk_left is None: - try: - chunk_left = self._read_next_chunk_size() - if chunk_left == 0: - break - except ValueError: - raise IncompleteRead(bytes(b'').join(value)) - value.append(self._safe_read(chunk_left)) - - # we read the whole chunk, get another - self._safe_read(2) # toss the CRLF at the end of the chunk - chunk_left = None - - self._read_and_discard_trailer() - - # we read everything; close the "file" - self._close_conn() - - return bytes(b'').join(value) - - def _readinto_chunked(self, b): - assert self.chunked != _UNKNOWN - chunk_left = self.chunk_left - - total_bytes = 0 - mvb = memoryview(b) - while True: - if chunk_left is None: - try: - chunk_left = self._read_next_chunk_size() - if chunk_left == 0: - break - except ValueError: - raise IncompleteRead(bytes(b[0:total_bytes])) - - if len(mvb) < chunk_left: - n = self._safe_readinto(mvb) - self.chunk_left = chunk_left - n - return total_bytes + n - elif len(mvb) == chunk_left: - n = self._safe_readinto(mvb) - self._safe_read(2) # toss the CRLF at the end of the chunk - self.chunk_left = None - return total_bytes + n - else: - temp_mvb = mvb[0:chunk_left] - n = self._safe_readinto(temp_mvb) - mvb = mvb[n:] - total_bytes += n - - # we read the whole chunk, get another - self._safe_read(2) # toss the CRLF at the end of the chunk - chunk_left = None - - self._read_and_discard_trailer() - - # we read everything; close the "file" - self._close_conn() - - return total_bytes - - def _safe_read(self, amt): - """Read the number of bytes requested, compensating for partial reads. - - Normally, we have a blocking socket, but a read() can be interrupted - by a signal (resulting in a partial read). - - Note that we cannot distinguish between EOF and an interrupt when zero - bytes have been read. IncompleteRead() will be raised in this - situation. - - This function should be used when bytes "should" be present for - reading. If the bytes are truly not available (due to EOF), then the - IncompleteRead exception can be used to detect the problem. - """ - s = [] - while amt > 0: - chunk = self.fp.read(min(amt, MAXAMOUNT)) - if not chunk: - raise IncompleteRead(bytes(b'').join(s), amt) - s.append(chunk) - amt -= len(chunk) - return bytes(b"").join(s) - - def _safe_readinto(self, b): - """Same as _safe_read, but for reading into a buffer.""" - total_bytes = 0 - mvb = memoryview(b) - while total_bytes < len(b): - if MAXAMOUNT < len(mvb): - temp_mvb = mvb[0:MAXAMOUNT] - if PY2: - data = self.fp.read(len(temp_mvb)) - n = len(data) - temp_mvb[:n] = data - else: - n = self.fp.readinto(temp_mvb) - else: - if PY2: - data = self.fp.read(len(mvb)) - n = len(data) - mvb[:n] = data - else: - n = self.fp.readinto(mvb) - if not n: - raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b)) - mvb = mvb[n:] - total_bytes += n - return total_bytes - - def fileno(self): - return self.fp.fileno() - - def getheader(self, name, default=None): - if self.headers is None: - raise ResponseNotReady() - headers = self.headers.get_all(name) or default - if isinstance(headers, str) or not hasattr(headers, '__iter__'): - return headers - else: - return ', '.join(headers) - - def getheaders(self): - """Return list of (header, value) tuples.""" - if self.headers is None: - raise ResponseNotReady() - return list(self.headers.items()) - - # We override IOBase.__iter__ so that it doesn't check for closed-ness - - def __iter__(self): - return self - - # For compatibility with old-style urllib responses. - - def info(self): - return self.headers - - def geturl(self): - return self.url - - def getcode(self): - return self.status - -class HTTPConnection(object): - - _http_vsn = 11 - _http_vsn_str = 'HTTP/1.1' - - response_class = HTTPResponse - default_port = HTTP_PORT - auto_open = 1 - debuglevel = 0 - - def __init__(self, host, port=None, strict=_strict_sentinel, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): - if strict is not _strict_sentinel: - warnings.warn("the 'strict' argument isn't supported anymore; " - "http.client now always assumes HTTP/1.x compliant servers.", - DeprecationWarning, 2) - self.timeout = timeout - self.source_address = source_address - self.sock = None - self._buffer = [] - self.__response = None - self.__state = _CS_IDLE - self._method = None - self._tunnel_host = None - self._tunnel_port = None - self._tunnel_headers = {} - - self._set_hostport(host, port) - - def set_tunnel(self, host, port=None, headers=None): - """ Sets up the host and the port for the HTTP CONNECT Tunnelling. - - The headers argument should be a mapping of extra HTTP headers - to send with the CONNECT request. - """ - self._tunnel_host = host - self._tunnel_port = port - if headers: - self._tunnel_headers = headers - else: - self._tunnel_headers.clear() - - def _set_hostport(self, host, port): - if port is None: - i = host.rfind(':') - j = host.rfind(']') # ipv6 addresses have [...] - if i > j: - try: - port = int(host[i+1:]) - except ValueError: - if host[i+1:] == "": # http://foo.com:/ == http://foo.com/ - port = self.default_port - else: - raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) - host = host[:i] - else: - port = self.default_port - if host and host[0] == '[' and host[-1] == ']': - host = host[1:-1] - self.host = host - self.port = port - - def set_debuglevel(self, level): - self.debuglevel = level - - def _tunnel(self): - self._set_hostport(self._tunnel_host, self._tunnel_port) - connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port) - connect_bytes = connect_str.encode("ascii") - self.send(connect_bytes) - for header, value in self._tunnel_headers.items(): - header_str = "%s: %s\r\n" % (header, value) - header_bytes = header_str.encode("latin-1") - self.send(header_bytes) - self.send(bytes(b'\r\n')) - - response = self.response_class(self.sock, method=self._method) - (version, code, message) = response._read_status() - - if code != 200: - self.close() - raise socket.error("Tunnel connection failed: %d %s" % (code, - message.strip())) - while True: - line = response.fp.readline(_MAXLINE + 1) - if len(line) > _MAXLINE: - raise LineTooLong("header line") - if not line: - # for sites which EOF without sending a trailer - break - if line in (b'\r\n', b'\n', b''): - break - - def connect(self): - """Connect to the host and port specified in __init__.""" - self.sock = socket_create_connection((self.host,self.port), - self.timeout, self.source_address) - if self._tunnel_host: - self._tunnel() - - def close(self): - """Close the connection to the HTTP server.""" - if self.sock: - self.sock.close() # close it manually... there may be other refs - self.sock = None - if self.__response: - self.__response.close() - self.__response = None - self.__state = _CS_IDLE - - def send(self, data): - """Send `data' to the server. - ``data`` can be a string object, a bytes object, an array object, a - file-like object that supports a .read() method, or an iterable object. - """ - - if self.sock is None: - if self.auto_open: - self.connect() - else: - raise NotConnected() - - if self.debuglevel > 0: - print("send:", repr(data)) - blocksize = 8192 - # Python 2.7 array objects have a read method which is incompatible - # with the 2-arg calling syntax below. - if hasattr(data, "read") and not isinstance(data, array): - if self.debuglevel > 0: - print("sendIng a read()able") - encode = False - try: - mode = data.mode - except AttributeError: - # io.BytesIO and other file-like objects don't have a `mode` - # attribute. - pass - else: - if "b" not in mode: - encode = True - if self.debuglevel > 0: - print("encoding file using iso-8859-1") - while 1: - datablock = data.read(blocksize) - if not datablock: - break - if encode: - datablock = datablock.encode("iso-8859-1") - self.sock.sendall(datablock) - return - try: - self.sock.sendall(data) - except TypeError: - if isinstance(data, Iterable): - for d in data: - self.sock.sendall(d) - else: - raise TypeError("data should be a bytes-like object " - "or an iterable, got %r" % type(data)) - - def _output(self, s): - """Add a line of output to the current request buffer. - - Assumes that the line does *not* end with \\r\\n. - """ - self._buffer.append(s) - - def _send_output(self, message_body=None): - """Send the currently buffered request and clear the buffer. - - Appends an extra \\r\\n to the buffer. - A message_body may be specified, to be appended to the request. - """ - self._buffer.extend((bytes(b""), bytes(b""))) - msg = bytes(b"\r\n").join(self._buffer) - del self._buffer[:] - # If msg and message_body are sent in a single send() call, - # it will avoid performance problems caused by the interaction - # between delayed ack and the Nagle algorithm. - if isinstance(message_body, bytes): - msg += message_body - message_body = None - self.send(msg) - if message_body is not None: - # message_body was not a string (i.e. it is a file), and - # we must run the risk of Nagle. - self.send(message_body) - - def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): - """Send a request to the server. - - `method' specifies an HTTP request method, e.g. 'GET'. - `url' specifies the object being requested, e.g. '/index.html'. - `skip_host' if True does not add automatically a 'Host:' header - `skip_accept_encoding' if True does not add automatically an - 'Accept-Encoding:' header - """ - - # if a prior response has been completed, then forget about it. - if self.__response and self.__response.isclosed(): - self.__response = None - - - # in certain cases, we cannot issue another request on this connection. - # this occurs when: - # 1) we are in the process of sending a request. (_CS_REQ_STARTED) - # 2) a response to a previous request has signalled that it is going - # to close the connection upon completion. - # 3) the headers for the previous response have not been read, thus - # we cannot determine whether point (2) is true. (_CS_REQ_SENT) - # - # if there is no prior response, then we can request at will. - # - # if point (2) is true, then we will have passed the socket to the - # response (effectively meaning, "there is no prior response"), and - # will open a new one when a new request is made. - # - # Note: if a prior response exists, then we *can* start a new request. - # We are not allowed to begin fetching the response to this new - # request, however, until that prior response is complete. - # - if self.__state == _CS_IDLE: - self.__state = _CS_REQ_STARTED - else: - raise CannotSendRequest(self.__state) - - # Save the method we use, we need it later in the response phase - self._method = method - if not url: - url = '/' - request = '%s %s %s' % (method, url, self._http_vsn_str) - - # Non-ASCII characters should have been eliminated earlier - self._output(request.encode('ascii')) - - if self._http_vsn == 11: - # Issue some standard headers for better HTTP/1.1 compliance - - if not skip_host: - # this header is issued *only* for HTTP/1.1 - # connections. more specifically, this means it is - # only issued when the client uses the new - # HTTPConnection() class. backwards-compat clients - # will be using HTTP/1.0 and those clients may be - # issuing this header themselves. we should NOT issue - # it twice; some web servers (such as Apache) barf - # when they see two Host: headers - - # If we need a non-standard port,include it in the - # header. If the request is going through a proxy, - # but the host of the actual URL, not the host of the - # proxy. - - netloc = '' - if url.startswith('http'): - nil, netloc, nil, nil, nil = urlsplit(url) - - if netloc: - try: - netloc_enc = netloc.encode("ascii") - except UnicodeEncodeError: - netloc_enc = netloc.encode("idna") - self.putheader('Host', netloc_enc) - else: - try: - host_enc = self.host.encode("ascii") - except UnicodeEncodeError: - host_enc = self.host.encode("idna") - - # As per RFC 273, IPv6 address should be wrapped with [] - # when used as Host header - - if self.host.find(':') >= 0: - host_enc = bytes(b'[' + host_enc + b']') - - if self.port == self.default_port: - self.putheader('Host', host_enc) - else: - host_enc = host_enc.decode("ascii") - self.putheader('Host', "%s:%s" % (host_enc, self.port)) - - # note: we are assuming that clients will not attempt to set these - # headers since *this* library must deal with the - # consequences. this also means that when the supporting - # libraries are updated to recognize other forms, then this - # code should be changed (removed or updated). - - # we only want a Content-Encoding of "identity" since we don't - # support encodings such as x-gzip or x-deflate. - if not skip_accept_encoding: - self.putheader('Accept-Encoding', 'identity') - - # we can accept "chunked" Transfer-Encodings, but no others - # NOTE: no TE header implies *only* "chunked" - #self.putheader('TE', 'chunked') - - # if TE is supplied in the header, then it must appear in a - # Connection header. - #self.putheader('Connection', 'TE') - - else: - # For HTTP/1.0, the server will assume "not chunked" - pass - - def putheader(self, header, *values): - """Send a request header line to the server. - - For example: h.putheader('Accept', 'text/html') - """ - if self.__state != _CS_REQ_STARTED: - raise CannotSendHeader() - - if hasattr(header, 'encode'): - header = header.encode('ascii') - values = list(values) - for i, one_value in enumerate(values): - if hasattr(one_value, 'encode'): - values[i] = one_value.encode('latin-1') - elif isinstance(one_value, int): - values[i] = str(one_value).encode('ascii') - value = bytes(b'\r\n\t').join(values) - header = header + bytes(b': ') + value - self._output(header) - - def endheaders(self, message_body=None): - """Indicate that the last header line has been sent to the server. - - This method sends the request to the server. The optional message_body - argument can be used to pass a message body associated with the - request. The message body will be sent in the same packet as the - message headers if it is a string, otherwise it is sent as a separate - packet. - """ - if self.__state == _CS_REQ_STARTED: - self.__state = _CS_REQ_SENT - else: - raise CannotSendHeader() - self._send_output(message_body) - - def request(self, method, url, body=None, headers={}): - """Send a complete request to the server.""" - self._send_request(method, url, body, headers) - - def _set_content_length(self, body): - # Set the content-length based on the body. - thelen = None - try: - thelen = str(len(body)) - except TypeError as te: - # If this is a file-like object, try to - # fstat its file descriptor - try: - thelen = str(os.fstat(body.fileno()).st_size) - except (AttributeError, OSError): - # Don't send a length if this failed - if self.debuglevel > 0: print("Cannot stat!!") - - if thelen is not None: - self.putheader('Content-Length', thelen) - - def _send_request(self, method, url, body, headers): - # Honor explicitly requested Host: and Accept-Encoding: headers. - header_names = dict.fromkeys([k.lower() for k in headers]) - skips = {} - if 'host' in header_names: - skips['skip_host'] = 1 - if 'accept-encoding' in header_names: - skips['skip_accept_encoding'] = 1 - - self.putrequest(method, url, **skips) - - if body is not None and ('content-length' not in header_names): - self._set_content_length(body) - for hdr, value in headers.items(): - self.putheader(hdr, value) - if isinstance(body, str): - # RFC 2616 Section 3.7.1 says that text default has a - # default charset of iso-8859-1. - body = body.encode('iso-8859-1') - self.endheaders(body) - - def getresponse(self): - """Get the response from the server. - - If the HTTPConnection is in the correct state, returns an - instance of HTTPResponse or of whatever object is returned by - class the response_class variable. - - If a request has not been sent or if a previous response has - not be handled, ResponseNotReady is raised. If the HTTP - response indicates that the connection should be closed, then - it will be closed before the response is returned. When the - connection is closed, the underlying socket is closed. - """ - - # if a prior response has been completed, then forget about it. - if self.__response and self.__response.isclosed(): - self.__response = None - - # if a prior response exists, then it must be completed (otherwise, we - # cannot read this response's header to determine the connection-close - # behavior) - # - # note: if a prior response existed, but was connection-close, then the - # socket and response were made independent of this HTTPConnection - # object since a new request requires that we open a whole new - # connection - # - # this means the prior response had one of two states: - # 1) will_close: this connection was reset and the prior socket and - # response operate independently - # 2) persistent: the response was retained and we await its - # isclosed() status to become true. - # - if self.__state != _CS_REQ_SENT or self.__response: - raise ResponseNotReady(self.__state) - - if self.debuglevel > 0: - response = self.response_class(self.sock, self.debuglevel, - method=self._method) - else: - response = self.response_class(self.sock, method=self._method) - - response.begin() - assert response.will_close != _UNKNOWN - self.__state = _CS_IDLE - - if response.will_close: - # this effectively passes the connection to the response - self.close() - else: - # remember this, so we can tell when it is complete - self.__response = response - - return response - -try: - import ssl - from ssl import SSLContext -except ImportError: - pass -else: - class HTTPSConnection(HTTPConnection): - "This class allows communication via SSL." - - default_port = HTTPS_PORT - - # XXX Should key_file and cert_file be deprecated in favour of context? - - def __init__(self, host, port=None, key_file=None, cert_file=None, - strict=_strict_sentinel, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, **_3to2kwargs): - if 'check_hostname' in _3to2kwargs: check_hostname = _3to2kwargs['check_hostname']; del _3to2kwargs['check_hostname'] - else: check_hostname = None - if 'context' in _3to2kwargs: context = _3to2kwargs['context']; del _3to2kwargs['context'] - else: context = None - super(HTTPSConnection, self).__init__(host, port, strict, timeout, - source_address) - self.key_file = key_file - self.cert_file = cert_file - if context is None: - # Some reasonable defaults - context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) - context.options |= ssl.OP_NO_SSLv2 - will_verify = context.verify_mode != ssl.CERT_NONE - if check_hostname is None: - check_hostname = will_verify - elif check_hostname and not will_verify: - raise ValueError("check_hostname needs a SSL context with " - "either CERT_OPTIONAL or CERT_REQUIRED") - if key_file or cert_file: - context.load_cert_chain(cert_file, key_file) - self._context = context - self._check_hostname = check_hostname - - def connect(self): - "Connect to a host on a given (SSL) port." - - sock = socket_create_connection((self.host, self.port), - self.timeout, self.source_address) - - if self._tunnel_host: - self.sock = sock - self._tunnel() - - server_hostname = self.host if ssl.HAS_SNI else None - self.sock = self._context.wrap_socket(sock, - server_hostname=server_hostname) - try: - if self._check_hostname: - ssl.match_hostname(self.sock.getpeercert(), self.host) - except Exception: - self.sock.shutdown(socket.SHUT_RDWR) - self.sock.close() - raise - - __all__.append("HTTPSConnection") - - - # ###################################### - # # We use the old HTTPSConnection class from Py2.7, because ssl.SSLContext - # # doesn't exist in the Py2.7 stdlib - # class HTTPSConnection(HTTPConnection): - # "This class allows communication via SSL." - - # default_port = HTTPS_PORT - - # def __init__(self, host, port=None, key_file=None, cert_file=None, - # strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - # source_address=None): - # HTTPConnection.__init__(self, host, port, strict, timeout, - # source_address) - # self.key_file = key_file - # self.cert_file = cert_file - - # def connect(self): - # "Connect to a host on a given (SSL) port." - - # sock = socket_create_connection((self.host, self.port), - # self.timeout, self.source_address) - # if self._tunnel_host: - # self.sock = sock - # self._tunnel() - # self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file) - - # __all__.append("HTTPSConnection") - # ###################################### - - -class HTTPException(Exception): - # Subclasses that define an __init__ must call Exception.__init__ - # or define self.args. Otherwise, str() will fail. - pass - -class NotConnected(HTTPException): - pass - -class InvalidURL(HTTPException): - pass - -class UnknownProtocol(HTTPException): - def __init__(self, version): - self.args = version, - self.version = version - -class UnknownTransferEncoding(HTTPException): - pass - -class UnimplementedFileMode(HTTPException): - pass - -class IncompleteRead(HTTPException): - def __init__(self, partial, expected=None): - self.args = partial, - self.partial = partial - self.expected = expected - def __repr__(self): - if self.expected is not None: - e = ', %i more expected' % self.expected - else: - e = '' - return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e) - def __str__(self): - return repr(self) - -class ImproperConnectionState(HTTPException): - pass - -class CannotSendRequest(ImproperConnectionState): - pass - -class CannotSendHeader(ImproperConnectionState): - pass - -class ResponseNotReady(ImproperConnectionState): - pass - -class BadStatusLine(HTTPException): - def __init__(self, line): - if not line: - line = repr(line) - self.args = line, - self.line = line - -class LineTooLong(HTTPException): - def __init__(self, line_type): - HTTPException.__init__(self, "got more than %d bytes when reading %s" - % (_MAXLINE, line_type)) - -# for backwards compatibility -error = HTTPException diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/cookiejar.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/cookiejar.py deleted file mode 100755 index af3ef41..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/cookiejar.py +++ /dev/null @@ -1,2110 +0,0 @@ -r"""HTTP cookie handling for web clients. - -This is a backport of the Py3.3 ``http.cookiejar`` module for -python-future. - -This module has (now fairly distant) origins in Gisle Aas' Perl module -HTTP::Cookies, from the libwww-perl library. - -Docstrings, comments and debug strings in this code refer to the -attributes of the HTTP cookie system as cookie-attributes, to distinguish -them clearly from Python attributes. - -Class diagram (note that BSDDBCookieJar and the MSIE* classes are not -distributed with the Python standard library, but are available from -http://wwwsearch.sf.net/): - - CookieJar____ - / \ \ - FileCookieJar \ \ - / | \ \ \ - MozillaCookieJar | LWPCookieJar \ \ - | | \ - | ---MSIEBase | \ - | / | | \ - | / MSIEDBCookieJar BSDDBCookieJar - |/ - MSIECookieJar - -""" - -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -from future.builtins import filter, int, map, open, str -from future.utils import as_native_str, PY2 - -__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy', - 'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar'] - -import copy -import datetime -import re -if PY2: - re.ASCII = 0 -import time -from future.backports.urllib.parse import urlparse, urlsplit, quote -from future.backports.http.client import HTTP_PORT -try: - import threading as _threading -except ImportError: - import dummy_threading as _threading -from calendar import timegm - -debug = False # set to True to enable debugging via the logging module -logger = None - -def _debug(*args): - if not debug: - return - global logger - if not logger: - import logging - logger = logging.getLogger("http.cookiejar") - return logger.debug(*args) - - -DEFAULT_HTTP_PORT = str(HTTP_PORT) -MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar " - "instance initialised with one)") - -def _warn_unhandled_exception(): - # There are a few catch-all except: statements in this module, for - # catching input that's bad in unexpected ways. Warn if any - # exceptions are caught there. - import io, warnings, traceback - f = io.StringIO() - traceback.print_exc(None, f) - msg = f.getvalue() - warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2) - - -# Date/time conversion -# ----------------------------------------------------------------------------- - -EPOCH_YEAR = 1970 -def _timegm(tt): - year, month, mday, hour, min, sec = tt[:6] - if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and - (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)): - return timegm(tt) - else: - return None - -DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] -MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", - "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] -MONTHS_LOWER = [] -for month in MONTHS: MONTHS_LOWER.append(month.lower()) - -def time2isoz(t=None): - """Return a string representing time in seconds since epoch, t. - - If the function is called without an argument, it will use the current - time. - - The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ", - representing Universal Time (UTC, aka GMT). An example of this format is: - - 1994-11-24 08:49:37Z - - """ - if t is None: - dt = datetime.datetime.utcnow() - else: - dt = datetime.datetime.utcfromtimestamp(t) - return "%04d-%02d-%02d %02d:%02d:%02dZ" % ( - dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) - -def time2netscape(t=None): - """Return a string representing time in seconds since epoch, t. - - If the function is called without an argument, it will use the current - time. - - The format of the returned string is like this: - - Wed, DD-Mon-YYYY HH:MM:SS GMT - - """ - if t is None: - dt = datetime.datetime.utcnow() - else: - dt = datetime.datetime.utcfromtimestamp(t) - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( - DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1], - dt.year, dt.hour, dt.minute, dt.second) - - -UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None} - -TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$", re.ASCII) -def offset_from_tz_string(tz): - offset = None - if tz in UTC_ZONES: - offset = 0 - else: - m = TIMEZONE_RE.search(tz) - if m: - offset = 3600 * int(m.group(2)) - if m.group(3): - offset = offset + 60 * int(m.group(3)) - if m.group(1) == '-': - offset = -offset - return offset - -def _str2time(day, mon, yr, hr, min, sec, tz): - # translate month name to number - # month numbers start with 1 (January) - try: - mon = MONTHS_LOWER.index(mon.lower())+1 - except ValueError: - # maybe it's already a number - try: - imon = int(mon) - except ValueError: - return None - if 1 <= imon <= 12: - mon = imon - else: - return None - - # make sure clock elements are defined - if hr is None: hr = 0 - if min is None: min = 0 - if sec is None: sec = 0 - - yr = int(yr) - day = int(day) - hr = int(hr) - min = int(min) - sec = int(sec) - - if yr < 1000: - # find "obvious" year - cur_yr = time.localtime(time.time())[0] - m = cur_yr % 100 - tmp = yr - yr = yr + cur_yr - m - m = m - tmp - if abs(m) > 50: - if m > 0: yr = yr + 100 - else: yr = yr - 100 - - # convert UTC time tuple to seconds since epoch (not timezone-adjusted) - t = _timegm((yr, mon, day, hr, min, sec, tz)) - - if t is not None: - # adjust time using timezone string, to get absolute time since epoch - if tz is None: - tz = "UTC" - tz = tz.upper() - offset = offset_from_tz_string(tz) - if offset is None: - return None - t = t - offset - - return t - -STRICT_DATE_RE = re.compile( - r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) " - "(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII) -WEEKDAY_RE = re.compile( - r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII) -LOOSE_HTTP_DATE_RE = re.compile( - r"""^ - (\d\d?) # day - (?:\s+|[-\/]) - (\w+) # month - (?:\s+|[-\/]) - (\d+) # year - (?: - (?:\s+|:) # separator before clock - (\d\d?):(\d\d) # hour:min - (?::(\d\d))? # optional seconds - )? # optional clock - \s* - ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone - \s* - (?:\(\w+\))? # ASCII representation of timezone in parens. - \s*$""", re.X | re.ASCII) -def http2time(text): - """Returns time in seconds since epoch of time represented by a string. - - Return value is an integer. - - None is returned if the format of str is unrecognized, the time is outside - the representable range, or the timezone string is not recognized. If the - string contains no timezone, UTC is assumed. - - The timezone in the string may be numerical (like "-0800" or "+0100") or a - string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the - timezone strings equivalent to UTC (zero offset) are known to the function. - - The function loosely parses the following formats: - - Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format - Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format - Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format - 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday) - 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday) - 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday) - - The parser ignores leading and trailing whitespace. The time may be - absent. - - If the year is given with only 2 digits, the function will select the - century that makes the year closest to the current date. - - """ - # fast exit for strictly conforming string - m = STRICT_DATE_RE.search(text) - if m: - g = m.groups() - mon = MONTHS_LOWER.index(g[1].lower()) + 1 - tt = (int(g[2]), mon, int(g[0]), - int(g[3]), int(g[4]), float(g[5])) - return _timegm(tt) - - # No, we need some messy parsing... - - # clean up - text = text.lstrip() - text = WEEKDAY_RE.sub("", text, 1) # Useless weekday - - # tz is time zone specifier string - day, mon, yr, hr, min, sec, tz = [None]*7 - - # loose regexp parse - m = LOOSE_HTTP_DATE_RE.search(text) - if m is not None: - day, mon, yr, hr, min, sec, tz = m.groups() - else: - return None # bad format - - return _str2time(day, mon, yr, hr, min, sec, tz) - -ISO_DATE_RE = re.compile( - """^ - (\d{4}) # year - [-\/]? - (\d\d?) # numerical month - [-\/]? - (\d\d?) # day - (?: - (?:\s+|[-:Tt]) # separator before clock - (\d\d?):?(\d\d) # hour:min - (?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional) - )? # optional clock - \s* - ([-+]?\d\d?:?(:?\d\d)? - |Z|z)? # timezone (Z is "zero meridian", i.e. GMT) - \s*$""", re.X | re. ASCII) -def iso2time(text): - """ - As for http2time, but parses the ISO 8601 formats: - - 1994-02-03 14:15:29 -0100 -- ISO 8601 format - 1994-02-03 14:15:29 -- zone is optional - 1994-02-03 -- only date - 1994-02-03T14:15:29 -- Use T as separator - 19940203T141529Z -- ISO 8601 compact format - 19940203 -- only date - - """ - # clean up - text = text.lstrip() - - # tz is time zone specifier string - day, mon, yr, hr, min, sec, tz = [None]*7 - - # loose regexp parse - m = ISO_DATE_RE.search(text) - if m is not None: - # XXX there's an extra bit of the timezone I'm ignoring here: is - # this the right thing to do? - yr, mon, day, hr, min, sec, tz, _ = m.groups() - else: - return None # bad format - - return _str2time(day, mon, yr, hr, min, sec, tz) - - -# Header parsing -# ----------------------------------------------------------------------------- - -def unmatched(match): - """Return unmatched part of re.Match object.""" - start, end = match.span(0) - return match.string[:start]+match.string[end:] - -HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)") -HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"") -HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)") -HEADER_ESCAPE_RE = re.compile(r"\\(.)") -def split_header_words(header_values): - r"""Parse header values into a list of lists containing key,value pairs. - - The function knows how to deal with ",", ";" and "=" as well as quoted - values after "=". A list of space separated tokens are parsed as if they - were separated by ";". - - If the header_values passed as argument contains multiple values, then they - are treated as if they were a single value separated by comma ",". - - This means that this function is useful for parsing header fields that - follow this syntax (BNF as from the HTTP/1.1 specification, but we relax - the requirement for tokens). - - headers = #header - header = (token | parameter) *( [";"] (token | parameter)) - - token = 1* - separators = "(" | ")" | "<" | ">" | "@" - | "," | ";" | ":" | "\" | <"> - | "/" | "[" | "]" | "?" | "=" - | "{" | "}" | SP | HT - - quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) - qdtext = > - quoted-pair = "\" CHAR - - parameter = attribute "=" value - attribute = token - value = token | quoted-string - - Each header is represented by a list of key/value pairs. The value for a - simple token (not part of a parameter) is None. Syntactically incorrect - headers will not necessarily be parsed as you would want. - - This is easier to describe with some examples: - - >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz']) - [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]] - >>> split_header_words(['text/html; charset="iso-8859-1"']) - [[('text/html', None), ('charset', 'iso-8859-1')]] - >>> split_header_words([r'Basic realm="\"foo\bar\""']) - [[('Basic', None), ('realm', '"foobar"')]] - - """ - assert not isinstance(header_values, str) - result = [] - for text in header_values: - orig_text = text - pairs = [] - while text: - m = HEADER_TOKEN_RE.search(text) - if m: - text = unmatched(m) - name = m.group(1) - m = HEADER_QUOTED_VALUE_RE.search(text) - if m: # quoted value - text = unmatched(m) - value = m.group(1) - value = HEADER_ESCAPE_RE.sub(r"\1", value) - else: - m = HEADER_VALUE_RE.search(text) - if m: # unquoted value - text = unmatched(m) - value = m.group(1) - value = value.rstrip() - else: - # no value, a lone token - value = None - pairs.append((name, value)) - elif text.lstrip().startswith(","): - # concatenated headers, as per RFC 2616 section 4.2 - text = text.lstrip()[1:] - if pairs: result.append(pairs) - pairs = [] - else: - # skip junk - non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) - assert nr_junk_chars > 0, ( - "split_header_words bug: '%s', '%s', %s" % - (orig_text, text, pairs)) - text = non_junk - if pairs: result.append(pairs) - return result - -HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])") -def join_header_words(lists): - """Do the inverse (almost) of the conversion done by split_header_words. - - Takes a list of lists of (key, value) pairs and produces a single header - value. Attribute values are quoted if needed. - - >>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]]) - 'text/plain; charset="iso-8859/1"' - >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]]) - 'text/plain, charset="iso-8859/1"' - - """ - headers = [] - for pairs in lists: - attr = [] - for k, v in pairs: - if v is not None: - if not re.search(r"^\w+$", v): - v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \ - v = '"%s"' % v - k = "%s=%s" % (k, v) - attr.append(k) - if attr: headers.append("; ".join(attr)) - return ", ".join(headers) - -def strip_quotes(text): - if text.startswith('"'): - text = text[1:] - if text.endswith('"'): - text = text[:-1] - return text - -def parse_ns_headers(ns_headers): - """Ad-hoc parser for Netscape protocol cookie-attributes. - - The old Netscape cookie format for Set-Cookie can for instance contain - an unquoted "," in the expires field, so we have to use this ad-hoc - parser instead of split_header_words. - - XXX This may not make the best possible effort to parse all the crap - that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient - parser is probably better, so could do worse than following that if - this ever gives any trouble. - - Currently, this is also used for parsing RFC 2109 cookies. - - """ - known_attrs = ("expires", "domain", "path", "secure", - # RFC 2109 attrs (may turn up in Netscape cookies, too) - "version", "port", "max-age") - - result = [] - for ns_header in ns_headers: - pairs = [] - version_set = False - for ii, param in enumerate(re.split(r";\s*", ns_header)): - param = param.rstrip() - if param == "": continue - if "=" not in param: - k, v = param, None - else: - k, v = re.split(r"\s*=\s*", param, 1) - k = k.lstrip() - if ii != 0: - lc = k.lower() - if lc in known_attrs: - k = lc - if k == "version": - # This is an RFC 2109 cookie. - v = strip_quotes(v) - version_set = True - if k == "expires": - # convert expires date to seconds since epoch - v = http2time(strip_quotes(v)) # None if invalid - pairs.append((k, v)) - - if pairs: - if not version_set: - pairs.append(("version", "0")) - result.append(pairs) - - return result - - -IPV4_RE = re.compile(r"\.\d+$", re.ASCII) -def is_HDN(text): - """Return True if text is a host domain name.""" - # XXX - # This may well be wrong. Which RFC is HDN defined in, if any (for - # the purposes of RFC 2965)? - # For the current implementation, what about IPv6? Remember to look - # at other uses of IPV4_RE also, if change this. - if IPV4_RE.search(text): - return False - if text == "": - return False - if text[0] == "." or text[-1] == ".": - return False - return True - -def domain_match(A, B): - """Return True if domain A domain-matches domain B, according to RFC 2965. - - A and B may be host domain names or IP addresses. - - RFC 2965, section 1: - - Host names can be specified either as an IP address or a HDN string. - Sometimes we compare one host name with another. (Such comparisons SHALL - be case-insensitive.) Host A's name domain-matches host B's if - - * their host name strings string-compare equal; or - - * A is a HDN string and has the form NB, where N is a non-empty - name string, B has the form .B', and B' is a HDN string. (So, - x.y.com domain-matches .Y.com but not Y.com.) - - Note that domain-match is not a commutative operation: a.b.c.com - domain-matches .c.com, but not the reverse. - - """ - # Note that, if A or B are IP addresses, the only relevant part of the - # definition of the domain-match algorithm is the direct string-compare. - A = A.lower() - B = B.lower() - if A == B: - return True - if not is_HDN(A): - return False - i = A.rfind(B) - if i == -1 or i == 0: - # A does not have form NB, or N is the empty string - return False - if not B.startswith("."): - return False - if not is_HDN(B[1:]): - return False - return True - -def liberal_is_HDN(text): - """Return True if text is a sort-of-like a host domain name. - - For accepting/blocking domains. - - """ - if IPV4_RE.search(text): - return False - return True - -def user_domain_match(A, B): - """For blocking/accepting domains. - - A and B may be host domain names or IP addresses. - - """ - A = A.lower() - B = B.lower() - if not (liberal_is_HDN(A) and liberal_is_HDN(B)): - if A == B: - # equal IP addresses - return True - return False - initial_dot = B.startswith(".") - if initial_dot and A.endswith(B): - return True - if not initial_dot and A == B: - return True - return False - -cut_port_re = re.compile(r":\d+$", re.ASCII) -def request_host(request): - """Return request-host, as defined by RFC 2965. - - Variation from RFC: returned value is lowercased, for convenient - comparison. - - """ - url = request.get_full_url() - host = urlparse(url)[1] - if host == "": - host = request.get_header("Host", "") - - # remove port, if present - host = cut_port_re.sub("", host, 1) - return host.lower() - -def eff_request_host(request): - """Return a tuple (request-host, effective request-host name). - - As defined by RFC 2965, except both are lowercased. - - """ - erhn = req_host = request_host(request) - if req_host.find(".") == -1 and not IPV4_RE.search(req_host): - erhn = req_host + ".local" - return req_host, erhn - -def request_path(request): - """Path component of request-URI, as defined by RFC 2965.""" - url = request.get_full_url() - parts = urlsplit(url) - path = escape_path(parts.path) - if not path.startswith("/"): - # fix bad RFC 2396 absoluteURI - path = "/" + path - return path - -def request_port(request): - host = request.host - i = host.find(':') - if i >= 0: - port = host[i+1:] - try: - int(port) - except ValueError: - _debug("nonnumeric port: '%s'", port) - return None - else: - port = DEFAULT_HTTP_PORT - return port - -# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't -# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738). -HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()" -ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])") -def uppercase_escaped_char(match): - return "%%%s" % match.group(1).upper() -def escape_path(path): - """Escape any invalid characters in HTTP URL, and uppercase all escapes.""" - # There's no knowing what character encoding was used to create URLs - # containing %-escapes, but since we have to pick one to escape invalid - # path characters, we pick UTF-8, as recommended in the HTML 4.0 - # specification: - # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 - # And here, kind of: draft-fielding-uri-rfc2396bis-03 - # (And in draft IRI specification: draft-duerst-iri-05) - # (And here, for new URI schemes: RFC 2718) - path = quote(path, HTTP_PATH_SAFE) - path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path) - return path - -def reach(h): - """Return reach of host h, as defined by RFC 2965, section 1. - - The reach R of a host name H is defined as follows: - - * If - - - H is the host domain name of a host; and, - - - H has the form A.B; and - - - A has no embedded (that is, interior) dots; and - - - B has at least one embedded dot, or B is the string "local". - then the reach of H is .B. - - * Otherwise, the reach of H is H. - - >>> reach("www.acme.com") - '.acme.com' - >>> reach("acme.com") - 'acme.com' - >>> reach("acme.local") - '.local' - - """ - i = h.find(".") - if i >= 0: - #a = h[:i] # this line is only here to show what a is - b = h[i+1:] - i = b.find(".") - if is_HDN(h) and (i >= 0 or b == "local"): - return "."+b - return h - -def is_third_party(request): - """ - - RFC 2965, section 3.3.6: - - An unverifiable transaction is to a third-party host if its request- - host U does not domain-match the reach R of the request-host O in the - origin transaction. - - """ - req_host = request_host(request) - if not domain_match(req_host, reach(request.get_origin_req_host())): - return True - else: - return False - - -class Cookie(object): - """HTTP Cookie. - - This class represents both Netscape and RFC 2965 cookies. - - This is deliberately a very simple class. It just holds attributes. It's - possible to construct Cookie instances that don't comply with the cookie - standards. CookieJar.make_cookies is the factory function for Cookie - objects -- it deals with cookie parsing, supplying defaults, and - normalising to the representation used in this class. CookiePolicy is - responsible for checking them to see whether they should be accepted from - and returned to the server. - - Note that the port may be present in the headers, but unspecified ("Port" - rather than"Port=80", for example); if this is the case, port is None. - - """ - - def __init__(self, version, name, value, - port, port_specified, - domain, domain_specified, domain_initial_dot, - path, path_specified, - secure, - expires, - discard, - comment, - comment_url, - rest, - rfc2109=False, - ): - - if version is not None: version = int(version) - if expires is not None: expires = int(expires) - if port is None and port_specified is True: - raise ValueError("if port is None, port_specified must be false") - - self.version = version - self.name = name - self.value = value - self.port = port - self.port_specified = port_specified - # normalise case, as per RFC 2965 section 3.3.3 - self.domain = domain.lower() - self.domain_specified = domain_specified - # Sigh. We need to know whether the domain given in the - # cookie-attribute had an initial dot, in order to follow RFC 2965 - # (as clarified in draft errata). Needed for the returned $Domain - # value. - self.domain_initial_dot = domain_initial_dot - self.path = path - self.path_specified = path_specified - self.secure = secure - self.expires = expires - self.discard = discard - self.comment = comment - self.comment_url = comment_url - self.rfc2109 = rfc2109 - - self._rest = copy.copy(rest) - - def has_nonstandard_attr(self, name): - return name in self._rest - def get_nonstandard_attr(self, name, default=None): - return self._rest.get(name, default) - def set_nonstandard_attr(self, name, value): - self._rest[name] = value - - def is_expired(self, now=None): - if now is None: now = time.time() - if (self.expires is not None) and (self.expires <= now): - return True - return False - - def __str__(self): - if self.port is None: p = "" - else: p = ":"+self.port - limit = self.domain + p + self.path - if self.value is not None: - namevalue = "%s=%s" % (self.name, self.value) - else: - namevalue = self.name - return "" % (namevalue, limit) - - @as_native_str() - def __repr__(self): - args = [] - for name in ("version", "name", "value", - "port", "port_specified", - "domain", "domain_specified", "domain_initial_dot", - "path", "path_specified", - "secure", "expires", "discard", "comment", "comment_url", - ): - attr = getattr(self, name) - ### Python-Future: - # Avoid u'...' prefixes for unicode strings: - if isinstance(attr, str): - attr = str(attr) - ### - args.append(str("%s=%s") % (name, repr(attr))) - args.append("rest=%s" % repr(self._rest)) - args.append("rfc2109=%s" % repr(self.rfc2109)) - return "Cookie(%s)" % ", ".join(args) - - -class CookiePolicy(object): - """Defines which cookies get accepted from and returned to server. - - May also modify cookies, though this is probably a bad idea. - - The subclass DefaultCookiePolicy defines the standard rules for Netscape - and RFC 2965 cookies -- override that if you want a customised policy. - - """ - def set_ok(self, cookie, request): - """Return true if (and only if) cookie should be accepted from server. - - Currently, pre-expired cookies never get this far -- the CookieJar - class deletes such cookies itself. - - """ - raise NotImplementedError() - - def return_ok(self, cookie, request): - """Return true if (and only if) cookie should be returned to server.""" - raise NotImplementedError() - - def domain_return_ok(self, domain, request): - """Return false if cookies should not be returned, given cookie domain. - """ - return True - - def path_return_ok(self, path, request): - """Return false if cookies should not be returned, given cookie path. - """ - return True - - -class DefaultCookiePolicy(CookiePolicy): - """Implements the standard rules for accepting and returning cookies.""" - - DomainStrictNoDots = 1 - DomainStrictNonDomain = 2 - DomainRFC2965Match = 4 - - DomainLiberal = 0 - DomainStrict = DomainStrictNoDots|DomainStrictNonDomain - - def __init__(self, - blocked_domains=None, allowed_domains=None, - netscape=True, rfc2965=False, - rfc2109_as_netscape=None, - hide_cookie2=False, - strict_domain=False, - strict_rfc2965_unverifiable=True, - strict_ns_unverifiable=False, - strict_ns_domain=DomainLiberal, - strict_ns_set_initial_dollar=False, - strict_ns_set_path=False, - ): - """Constructor arguments should be passed as keyword arguments only.""" - self.netscape = netscape - self.rfc2965 = rfc2965 - self.rfc2109_as_netscape = rfc2109_as_netscape - self.hide_cookie2 = hide_cookie2 - self.strict_domain = strict_domain - self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable - self.strict_ns_unverifiable = strict_ns_unverifiable - self.strict_ns_domain = strict_ns_domain - self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar - self.strict_ns_set_path = strict_ns_set_path - - if blocked_domains is not None: - self._blocked_domains = tuple(blocked_domains) - else: - self._blocked_domains = () - - if allowed_domains is not None: - allowed_domains = tuple(allowed_domains) - self._allowed_domains = allowed_domains - - def blocked_domains(self): - """Return the sequence of blocked domains (as a tuple).""" - return self._blocked_domains - def set_blocked_domains(self, blocked_domains): - """Set the sequence of blocked domains.""" - self._blocked_domains = tuple(blocked_domains) - - def is_blocked(self, domain): - for blocked_domain in self._blocked_domains: - if user_domain_match(domain, blocked_domain): - return True - return False - - def allowed_domains(self): - """Return None, or the sequence of allowed domains (as a tuple).""" - return self._allowed_domains - def set_allowed_domains(self, allowed_domains): - """Set the sequence of allowed domains, or None.""" - if allowed_domains is not None: - allowed_domains = tuple(allowed_domains) - self._allowed_domains = allowed_domains - - def is_not_allowed(self, domain): - if self._allowed_domains is None: - return False - for allowed_domain in self._allowed_domains: - if user_domain_match(domain, allowed_domain): - return False - return True - - def set_ok(self, cookie, request): - """ - If you override .set_ok(), be sure to call this method. If it returns - false, so should your subclass (assuming your subclass wants to be more - strict about which cookies to accept). - - """ - _debug(" - checking cookie %s=%s", cookie.name, cookie.value) - - assert cookie.name is not None - - for n in "version", "verifiability", "name", "path", "domain", "port": - fn_name = "set_ok_"+n - fn = getattr(self, fn_name) - if not fn(cookie, request): - return False - - return True - - def set_ok_version(self, cookie, request): - if cookie.version is None: - # Version is always set to 0 by parse_ns_headers if it's a Netscape - # cookie, so this must be an invalid RFC 2965 cookie. - _debug(" Set-Cookie2 without version attribute (%s=%s)", - cookie.name, cookie.value) - return False - if cookie.version > 0 and not self.rfc2965: - _debug(" RFC 2965 cookies are switched off") - return False - elif cookie.version == 0 and not self.netscape: - _debug(" Netscape cookies are switched off") - return False - return True - - def set_ok_verifiability(self, cookie, request): - if request.unverifiable and is_third_party(request): - if cookie.version > 0 and self.strict_rfc2965_unverifiable: - _debug(" third-party RFC 2965 cookie during " - "unverifiable transaction") - return False - elif cookie.version == 0 and self.strict_ns_unverifiable: - _debug(" third-party Netscape cookie during " - "unverifiable transaction") - return False - return True - - def set_ok_name(self, cookie, request): - # Try and stop servers setting V0 cookies designed to hack other - # servers that know both V0 and V1 protocols. - if (cookie.version == 0 and self.strict_ns_set_initial_dollar and - cookie.name.startswith("$")): - _debug(" illegal name (starts with '$'): '%s'", cookie.name) - return False - return True - - def set_ok_path(self, cookie, request): - if cookie.path_specified: - req_path = request_path(request) - if ((cookie.version > 0 or - (cookie.version == 0 and self.strict_ns_set_path)) and - not req_path.startswith(cookie.path)): - _debug(" path attribute %s is not a prefix of request " - "path %s", cookie.path, req_path) - return False - return True - - def set_ok_domain(self, cookie, request): - if self.is_blocked(cookie.domain): - _debug(" domain %s is in user block-list", cookie.domain) - return False - if self.is_not_allowed(cookie.domain): - _debug(" domain %s is not in user allow-list", cookie.domain) - return False - if cookie.domain_specified: - req_host, erhn = eff_request_host(request) - domain = cookie.domain - if self.strict_domain and (domain.count(".") >= 2): - # XXX This should probably be compared with the Konqueror - # (kcookiejar.cpp) and Mozilla implementations, but it's a - # losing battle. - i = domain.rfind(".") - j = domain.rfind(".", 0, i) - if j == 0: # domain like .foo.bar - tld = domain[i+1:] - sld = domain[j+1:i] - if sld.lower() in ("co", "ac", "com", "edu", "org", "net", - "gov", "mil", "int", "aero", "biz", "cat", "coop", - "info", "jobs", "mobi", "museum", "name", "pro", - "travel", "eu") and len(tld) == 2: - # domain like .co.uk - _debug(" country-code second level domain %s", domain) - return False - if domain.startswith("."): - undotted_domain = domain[1:] - else: - undotted_domain = domain - embedded_dots = (undotted_domain.find(".") >= 0) - if not embedded_dots and domain != ".local": - _debug(" non-local domain %s contains no embedded dot", - domain) - return False - if cookie.version == 0: - if (not erhn.endswith(domain) and - (not erhn.startswith(".") and - not ("."+erhn).endswith(domain))): - _debug(" effective request-host %s (even with added " - "initial dot) does not end with %s", - erhn, domain) - return False - if (cookie.version > 0 or - (self.strict_ns_domain & self.DomainRFC2965Match)): - if not domain_match(erhn, domain): - _debug(" effective request-host %s does not domain-match " - "%s", erhn, domain) - return False - if (cookie.version > 0 or - (self.strict_ns_domain & self.DomainStrictNoDots)): - host_prefix = req_host[:-len(domain)] - if (host_prefix.find(".") >= 0 and - not IPV4_RE.search(req_host)): - _debug(" host prefix %s for domain %s contains a dot", - host_prefix, domain) - return False - return True - - def set_ok_port(self, cookie, request): - if cookie.port_specified: - req_port = request_port(request) - if req_port is None: - req_port = "80" - else: - req_port = str(req_port) - for p in cookie.port.split(","): - try: - int(p) - except ValueError: - _debug(" bad port %s (not numeric)", p) - return False - if p == req_port: - break - else: - _debug(" request port (%s) not found in %s", - req_port, cookie.port) - return False - return True - - def return_ok(self, cookie, request): - """ - If you override .return_ok(), be sure to call this method. If it - returns false, so should your subclass (assuming your subclass wants to - be more strict about which cookies to return). - - """ - # Path has already been checked by .path_return_ok(), and domain - # blocking done by .domain_return_ok(). - _debug(" - checking cookie %s=%s", cookie.name, cookie.value) - - for n in "version", "verifiability", "secure", "expires", "port", "domain": - fn_name = "return_ok_"+n - fn = getattr(self, fn_name) - if not fn(cookie, request): - return False - return True - - def return_ok_version(self, cookie, request): - if cookie.version > 0 and not self.rfc2965: - _debug(" RFC 2965 cookies are switched off") - return False - elif cookie.version == 0 and not self.netscape: - _debug(" Netscape cookies are switched off") - return False - return True - - def return_ok_verifiability(self, cookie, request): - if request.unverifiable and is_third_party(request): - if cookie.version > 0 and self.strict_rfc2965_unverifiable: - _debug(" third-party RFC 2965 cookie during unverifiable " - "transaction") - return False - elif cookie.version == 0 and self.strict_ns_unverifiable: - _debug(" third-party Netscape cookie during unverifiable " - "transaction") - return False - return True - - def return_ok_secure(self, cookie, request): - if cookie.secure and request.type != "https": - _debug(" secure cookie with non-secure request") - return False - return True - - def return_ok_expires(self, cookie, request): - if cookie.is_expired(self._now): - _debug(" cookie expired") - return False - return True - - def return_ok_port(self, cookie, request): - if cookie.port: - req_port = request_port(request) - if req_port is None: - req_port = "80" - for p in cookie.port.split(","): - if p == req_port: - break - else: - _debug(" request port %s does not match cookie port %s", - req_port, cookie.port) - return False - return True - - def return_ok_domain(self, cookie, request): - req_host, erhn = eff_request_host(request) - domain = cookie.domain - - # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't - if (cookie.version == 0 and - (self.strict_ns_domain & self.DomainStrictNonDomain) and - not cookie.domain_specified and domain != erhn): - _debug(" cookie with unspecified domain does not string-compare " - "equal to request domain") - return False - - if cookie.version > 0 and not domain_match(erhn, domain): - _debug(" effective request-host name %s does not domain-match " - "RFC 2965 cookie domain %s", erhn, domain) - return False - if cookie.version == 0 and not ("."+erhn).endswith(domain): - _debug(" request-host %s does not match Netscape cookie domain " - "%s", req_host, domain) - return False - return True - - def domain_return_ok(self, domain, request): - # Liberal check of. This is here as an optimization to avoid - # having to load lots of MSIE cookie files unless necessary. - req_host, erhn = eff_request_host(request) - if not req_host.startswith("."): - req_host = "."+req_host - if not erhn.startswith("."): - erhn = "."+erhn - if not (req_host.endswith(domain) or erhn.endswith(domain)): - #_debug(" request domain %s does not match cookie domain %s", - # req_host, domain) - return False - - if self.is_blocked(domain): - _debug(" domain %s is in user block-list", domain) - return False - if self.is_not_allowed(domain): - _debug(" domain %s is not in user allow-list", domain) - return False - - return True - - def path_return_ok(self, path, request): - _debug("- checking cookie path=%s", path) - req_path = request_path(request) - if not req_path.startswith(path): - _debug(" %s does not path-match %s", req_path, path) - return False - return True - - -def vals_sorted_by_key(adict): - keys = sorted(adict.keys()) - return map(adict.get, keys) - -def deepvalues(mapping): - """Iterates over nested mapping, depth-first, in sorted order by key.""" - values = vals_sorted_by_key(mapping) - for obj in values: - mapping = False - try: - obj.items - except AttributeError: - pass - else: - mapping = True - for subobj in deepvalues(obj): - yield subobj - if not mapping: - yield obj - - -# Used as second parameter to dict.get() method, to distinguish absent -# dict key from one with a None value. -class Absent(object): pass - -class CookieJar(object): - """Collection of HTTP cookies. - - You may not need to know about this class: try - urllib.request.build_opener(HTTPCookieProcessor).open(url). - """ - - non_word_re = re.compile(r"\W") - quote_re = re.compile(r"([\"\\])") - strict_domain_re = re.compile(r"\.?[^.]*") - domain_re = re.compile(r"[^.]*") - dots_re = re.compile(r"^\.+") - - magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII) - - def __init__(self, policy=None): - if policy is None: - policy = DefaultCookiePolicy() - self._policy = policy - - self._cookies_lock = _threading.RLock() - self._cookies = {} - - def set_policy(self, policy): - self._policy = policy - - def _cookies_for_domain(self, domain, request): - cookies = [] - if not self._policy.domain_return_ok(domain, request): - return [] - _debug("Checking %s for cookies to return", domain) - cookies_by_path = self._cookies[domain] - for path in cookies_by_path.keys(): - if not self._policy.path_return_ok(path, request): - continue - cookies_by_name = cookies_by_path[path] - for cookie in cookies_by_name.values(): - if not self._policy.return_ok(cookie, request): - _debug(" not returning cookie") - continue - _debug(" it's a match") - cookies.append(cookie) - return cookies - - def _cookies_for_request(self, request): - """Return a list of cookies to be returned to server.""" - cookies = [] - for domain in self._cookies.keys(): - cookies.extend(self._cookies_for_domain(domain, request)) - return cookies - - def _cookie_attrs(self, cookies): - """Return a list of cookie-attributes to be returned to server. - - like ['foo="bar"; $Path="/"', ...] - - The $Version attribute is also added when appropriate (currently only - once per request). - - """ - # add cookies in order of most specific (ie. longest) path first - cookies.sort(key=lambda a: len(a.path), reverse=True) - - version_set = False - - attrs = [] - for cookie in cookies: - # set version of Cookie header - # XXX - # What should it be if multiple matching Set-Cookie headers have - # different versions themselves? - # Answer: there is no answer; was supposed to be settled by - # RFC 2965 errata, but that may never appear... - version = cookie.version - if not version_set: - version_set = True - if version > 0: - attrs.append("$Version=%s" % version) - - # quote cookie value if necessary - # (not for Netscape protocol, which already has any quotes - # intact, due to the poorly-specified Netscape Cookie: syntax) - if ((cookie.value is not None) and - self.non_word_re.search(cookie.value) and version > 0): - value = self.quote_re.sub(r"\\\1", cookie.value) - else: - value = cookie.value - - # add cookie-attributes to be returned in Cookie header - if cookie.value is None: - attrs.append(cookie.name) - else: - attrs.append("%s=%s" % (cookie.name, value)) - if version > 0: - if cookie.path_specified: - attrs.append('$Path="%s"' % cookie.path) - if cookie.domain.startswith("."): - domain = cookie.domain - if (not cookie.domain_initial_dot and - domain.startswith(".")): - domain = domain[1:] - attrs.append('$Domain="%s"' % domain) - if cookie.port is not None: - p = "$Port" - if cookie.port_specified: - p = p + ('="%s"' % cookie.port) - attrs.append(p) - - return attrs - - def add_cookie_header(self, request): - """Add correct Cookie: header to request (urllib.request.Request object). - - The Cookie2 header is also added unless policy.hide_cookie2 is true. - - """ - _debug("add_cookie_header") - self._cookies_lock.acquire() - try: - - self._policy._now = self._now = int(time.time()) - - cookies = self._cookies_for_request(request) - - attrs = self._cookie_attrs(cookies) - if attrs: - if not request.has_header("Cookie"): - request.add_unredirected_header( - "Cookie", "; ".join(attrs)) - - # if necessary, advertise that we know RFC 2965 - if (self._policy.rfc2965 and not self._policy.hide_cookie2 and - not request.has_header("Cookie2")): - for cookie in cookies: - if cookie.version != 1: - request.add_unredirected_header("Cookie2", '$Version="1"') - break - - finally: - self._cookies_lock.release() - - self.clear_expired_cookies() - - def _normalized_cookie_tuples(self, attrs_set): - """Return list of tuples containing normalised cookie information. - - attrs_set is the list of lists of key,value pairs extracted from - the Set-Cookie or Set-Cookie2 headers. - - Tuples are name, value, standard, rest, where name and value are the - cookie name and value, standard is a dictionary containing the standard - cookie-attributes (discard, secure, version, expires or max-age, - domain, path and port) and rest is a dictionary containing the rest of - the cookie-attributes. - - """ - cookie_tuples = [] - - boolean_attrs = "discard", "secure" - value_attrs = ("version", - "expires", "max-age", - "domain", "path", "port", - "comment", "commenturl") - - for cookie_attrs in attrs_set: - name, value = cookie_attrs[0] - - # Build dictionary of standard cookie-attributes (standard) and - # dictionary of other cookie-attributes (rest). - - # Note: expiry time is normalised to seconds since epoch. V0 - # cookies should have the Expires cookie-attribute, and V1 cookies - # should have Max-Age, but since V1 includes RFC 2109 cookies (and - # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we - # accept either (but prefer Max-Age). - max_age_set = False - - bad_cookie = False - - standard = {} - rest = {} - for k, v in cookie_attrs[1:]: - lc = k.lower() - # don't lose case distinction for unknown fields - if lc in value_attrs or lc in boolean_attrs: - k = lc - if k in boolean_attrs and v is None: - # boolean cookie-attribute is present, but has no value - # (like "discard", rather than "port=80") - v = True - if k in standard: - # only first value is significant - continue - if k == "domain": - if v is None: - _debug(" missing value for domain attribute") - bad_cookie = True - break - # RFC 2965 section 3.3.3 - v = v.lower() - if k == "expires": - if max_age_set: - # Prefer max-age to expires (like Mozilla) - continue - if v is None: - _debug(" missing or invalid value for expires " - "attribute: treating as session cookie") - continue - if k == "max-age": - max_age_set = True - try: - v = int(v) - except ValueError: - _debug(" missing or invalid (non-numeric) value for " - "max-age attribute") - bad_cookie = True - break - # convert RFC 2965 Max-Age to seconds since epoch - # XXX Strictly you're supposed to follow RFC 2616 - # age-calculation rules. Remember that zero Max-Age is a - # is a request to discard (old and new) cookie, though. - k = "expires" - v = self._now + v - if (k in value_attrs) or (k in boolean_attrs): - if (v is None and - k not in ("port", "comment", "commenturl")): - _debug(" missing value for %s attribute" % k) - bad_cookie = True - break - standard[k] = v - else: - rest[k] = v - - if bad_cookie: - continue - - cookie_tuples.append((name, value, standard, rest)) - - return cookie_tuples - - def _cookie_from_cookie_tuple(self, tup, request): - # standard is dict of standard cookie-attributes, rest is dict of the - # rest of them - name, value, standard, rest = tup - - domain = standard.get("domain", Absent) - path = standard.get("path", Absent) - port = standard.get("port", Absent) - expires = standard.get("expires", Absent) - - # set the easy defaults - version = standard.get("version", None) - if version is not None: - try: - version = int(version) - except ValueError: - return None # invalid version, ignore cookie - secure = standard.get("secure", False) - # (discard is also set if expires is Absent) - discard = standard.get("discard", False) - comment = standard.get("comment", None) - comment_url = standard.get("commenturl", None) - - # set default path - if path is not Absent and path != "": - path_specified = True - path = escape_path(path) - else: - path_specified = False - path = request_path(request) - i = path.rfind("/") - if i != -1: - if version == 0: - # Netscape spec parts company from reality here - path = path[:i] - else: - path = path[:i+1] - if len(path) == 0: path = "/" - - # set default domain - domain_specified = domain is not Absent - # but first we have to remember whether it starts with a dot - domain_initial_dot = False - if domain_specified: - domain_initial_dot = bool(domain.startswith(".")) - if domain is Absent: - req_host, erhn = eff_request_host(request) - domain = erhn - elif not domain.startswith("."): - domain = "."+domain - - # set default port - port_specified = False - if port is not Absent: - if port is None: - # Port attr present, but has no value: default to request port. - # Cookie should then only be sent back on that port. - port = request_port(request) - else: - port_specified = True - port = re.sub(r"\s+", "", port) - else: - # No port attr present. Cookie can be sent back on any port. - port = None - - # set default expires and discard - if expires is Absent: - expires = None - discard = True - elif expires <= self._now: - # Expiry date in past is request to delete cookie. This can't be - # in DefaultCookiePolicy, because can't delete cookies there. - try: - self.clear(domain, path, name) - except KeyError: - pass - _debug("Expiring cookie, domain='%s', path='%s', name='%s'", - domain, path, name) - return None - - return Cookie(version, - name, value, - port, port_specified, - domain, domain_specified, domain_initial_dot, - path, path_specified, - secure, - expires, - discard, - comment, - comment_url, - rest) - - def _cookies_from_attrs_set(self, attrs_set, request): - cookie_tuples = self._normalized_cookie_tuples(attrs_set) - - cookies = [] - for tup in cookie_tuples: - cookie = self._cookie_from_cookie_tuple(tup, request) - if cookie: cookies.append(cookie) - return cookies - - def _process_rfc2109_cookies(self, cookies): - rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None) - if rfc2109_as_ns is None: - rfc2109_as_ns = not self._policy.rfc2965 - for cookie in cookies: - if cookie.version == 1: - cookie.rfc2109 = True - if rfc2109_as_ns: - # treat 2109 cookies as Netscape cookies rather than - # as RFC2965 cookies - cookie.version = 0 - - def make_cookies(self, response, request): - """Return sequence of Cookie objects extracted from response object.""" - # get cookie-attributes for RFC 2965 and Netscape protocols - headers = response.info() - rfc2965_hdrs = headers.get_all("Set-Cookie2", []) - ns_hdrs = headers.get_all("Set-Cookie", []) - - rfc2965 = self._policy.rfc2965 - netscape = self._policy.netscape - - if ((not rfc2965_hdrs and not ns_hdrs) or - (not ns_hdrs and not rfc2965) or - (not rfc2965_hdrs and not netscape) or - (not netscape and not rfc2965)): - return [] # no relevant cookie headers: quick exit - - try: - cookies = self._cookies_from_attrs_set( - split_header_words(rfc2965_hdrs), request) - except Exception: - _warn_unhandled_exception() - cookies = [] - - if ns_hdrs and netscape: - try: - # RFC 2109 and Netscape cookies - ns_cookies = self._cookies_from_attrs_set( - parse_ns_headers(ns_hdrs), request) - except Exception: - _warn_unhandled_exception() - ns_cookies = [] - self._process_rfc2109_cookies(ns_cookies) - - # Look for Netscape cookies (from Set-Cookie headers) that match - # corresponding RFC 2965 cookies (from Set-Cookie2 headers). - # For each match, keep the RFC 2965 cookie and ignore the Netscape - # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are - # bundled in with the Netscape cookies for this purpose, which is - # reasonable behaviour. - if rfc2965: - lookup = {} - for cookie in cookies: - lookup[(cookie.domain, cookie.path, cookie.name)] = None - - def no_matching_rfc2965(ns_cookie, lookup=lookup): - key = ns_cookie.domain, ns_cookie.path, ns_cookie.name - return key not in lookup - ns_cookies = filter(no_matching_rfc2965, ns_cookies) - - if ns_cookies: - cookies.extend(ns_cookies) - - return cookies - - def set_cookie_if_ok(self, cookie, request): - """Set a cookie if policy says it's OK to do so.""" - self._cookies_lock.acquire() - try: - self._policy._now = self._now = int(time.time()) - - if self._policy.set_ok(cookie, request): - self.set_cookie(cookie) - - - finally: - self._cookies_lock.release() - - def set_cookie(self, cookie): - """Set a cookie, without checking whether or not it should be set.""" - c = self._cookies - self._cookies_lock.acquire() - try: - if cookie.domain not in c: c[cookie.domain] = {} - c2 = c[cookie.domain] - if cookie.path not in c2: c2[cookie.path] = {} - c3 = c2[cookie.path] - c3[cookie.name] = cookie - finally: - self._cookies_lock.release() - - def extract_cookies(self, response, request): - """Extract cookies from response, where allowable given the request.""" - _debug("extract_cookies: %s", response.info()) - self._cookies_lock.acquire() - try: - self._policy._now = self._now = int(time.time()) - - for cookie in self.make_cookies(response, request): - if self._policy.set_ok(cookie, request): - _debug(" setting cookie: %s", cookie) - self.set_cookie(cookie) - finally: - self._cookies_lock.release() - - def clear(self, domain=None, path=None, name=None): - """Clear some cookies. - - Invoking this method without arguments will clear all cookies. If - given a single argument, only cookies belonging to that domain will be - removed. If given two arguments, cookies belonging to the specified - path within that domain are removed. If given three arguments, then - the cookie with the specified name, path and domain is removed. - - Raises KeyError if no matching cookie exists. - - """ - if name is not None: - if (domain is None) or (path is None): - raise ValueError( - "domain and path must be given to remove a cookie by name") - del self._cookies[domain][path][name] - elif path is not None: - if domain is None: - raise ValueError( - "domain must be given to remove cookies by path") - del self._cookies[domain][path] - elif domain is not None: - del self._cookies[domain] - else: - self._cookies = {} - - def clear_session_cookies(self): - """Discard all session cookies. - - Note that the .save() method won't save session cookies anyway, unless - you ask otherwise by passing a true ignore_discard argument. - - """ - self._cookies_lock.acquire() - try: - for cookie in self: - if cookie.discard: - self.clear(cookie.domain, cookie.path, cookie.name) - finally: - self._cookies_lock.release() - - def clear_expired_cookies(self): - """Discard all expired cookies. - - You probably don't need to call this method: expired cookies are never - sent back to the server (provided you're using DefaultCookiePolicy), - this method is called by CookieJar itself every so often, and the - .save() method won't save expired cookies anyway (unless you ask - otherwise by passing a true ignore_expires argument). - - """ - self._cookies_lock.acquire() - try: - now = time.time() - for cookie in self: - if cookie.is_expired(now): - self.clear(cookie.domain, cookie.path, cookie.name) - finally: - self._cookies_lock.release() - - def __iter__(self): - return deepvalues(self._cookies) - - def __len__(self): - """Return number of contained cookies.""" - i = 0 - for cookie in self: i = i + 1 - return i - - @as_native_str() - def __repr__(self): - r = [] - for cookie in self: r.append(repr(cookie)) - return "<%s[%s]>" % (self.__class__, ", ".join(r)) - - def __str__(self): - r = [] - for cookie in self: r.append(str(cookie)) - return "<%s[%s]>" % (self.__class__, ", ".join(r)) - - -# derives from IOError for backwards-compatibility with Python 2.4.0 -class LoadError(IOError): pass - -class FileCookieJar(CookieJar): - """CookieJar that can be loaded from and saved to a file.""" - - def __init__(self, filename=None, delayload=False, policy=None): - """ - Cookies are NOT loaded from the named file until either the .load() or - .revert() method is called. - - """ - CookieJar.__init__(self, policy) - if filename is not None: - try: - filename+"" - except: - raise ValueError("filename must be string-like") - self.filename = filename - self.delayload = bool(delayload) - - def save(self, filename=None, ignore_discard=False, ignore_expires=False): - """Save cookies to a file.""" - raise NotImplementedError() - - def load(self, filename=None, ignore_discard=False, ignore_expires=False): - """Load cookies from a file.""" - if filename is None: - if self.filename is not None: filename = self.filename - else: raise ValueError(MISSING_FILENAME_TEXT) - - f = open(filename) - try: - self._really_load(f, filename, ignore_discard, ignore_expires) - finally: - f.close() - - def revert(self, filename=None, - ignore_discard=False, ignore_expires=False): - """Clear all cookies and reload cookies from a saved file. - - Raises LoadError (or IOError) if reversion is not successful; the - object's state will not be altered if this happens. - - """ - if filename is None: - if self.filename is not None: filename = self.filename - else: raise ValueError(MISSING_FILENAME_TEXT) - - self._cookies_lock.acquire() - try: - - old_state = copy.deepcopy(self._cookies) - self._cookies = {} - try: - self.load(filename, ignore_discard, ignore_expires) - except (LoadError, IOError): - self._cookies = old_state - raise - - finally: - self._cookies_lock.release() - - -def lwp_cookie_str(cookie): - """Return string representation of Cookie in an the LWP cookie file format. - - Actually, the format is extended a bit -- see module docstring. - - """ - h = [(cookie.name, cookie.value), - ("path", cookie.path), - ("domain", cookie.domain)] - if cookie.port is not None: h.append(("port", cookie.port)) - if cookie.path_specified: h.append(("path_spec", None)) - if cookie.port_specified: h.append(("port_spec", None)) - if cookie.domain_initial_dot: h.append(("domain_dot", None)) - if cookie.secure: h.append(("secure", None)) - if cookie.expires: h.append(("expires", - time2isoz(float(cookie.expires)))) - if cookie.discard: h.append(("discard", None)) - if cookie.comment: h.append(("comment", cookie.comment)) - if cookie.comment_url: h.append(("commenturl", cookie.comment_url)) - - keys = sorted(cookie._rest.keys()) - for k in keys: - h.append((k, str(cookie._rest[k]))) - - h.append(("version", str(cookie.version))) - - return join_header_words([h]) - -class LWPCookieJar(FileCookieJar): - """ - The LWPCookieJar saves a sequence of "Set-Cookie3" lines. - "Set-Cookie3" is the format used by the libwww-perl libary, not known - to be compatible with any browser, but which is easy to read and - doesn't lose information about RFC 2965 cookies. - - Additional methods - - as_lwp_str(ignore_discard=True, ignore_expired=True) - - """ - - def as_lwp_str(self, ignore_discard=True, ignore_expires=True): - """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers. - - ignore_discard and ignore_expires: see docstring for FileCookieJar.save - - """ - now = time.time() - r = [] - for cookie in self: - if not ignore_discard and cookie.discard: - continue - if not ignore_expires and cookie.is_expired(now): - continue - r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie)) - return "\n".join(r+[""]) - - def save(self, filename=None, ignore_discard=False, ignore_expires=False): - if filename is None: - if self.filename is not None: filename = self.filename - else: raise ValueError(MISSING_FILENAME_TEXT) - - f = open(filename, "w") - try: - # There really isn't an LWP Cookies 2.0 format, but this indicates - # that there is extra information in here (domain_dot and - # port_spec) while still being compatible with libwww-perl, I hope. - f.write("#LWP-Cookies-2.0\n") - f.write(self.as_lwp_str(ignore_discard, ignore_expires)) - finally: - f.close() - - def _really_load(self, f, filename, ignore_discard, ignore_expires): - magic = f.readline() - if not self.magic_re.search(magic): - msg = ("%r does not look like a Set-Cookie3 (LWP) format " - "file" % filename) - raise LoadError(msg) - - now = time.time() - - header = "Set-Cookie3:" - boolean_attrs = ("port_spec", "path_spec", "domain_dot", - "secure", "discard") - value_attrs = ("version", - "port", "path", "domain", - "expires", - "comment", "commenturl") - - try: - while 1: - line = f.readline() - if line == "": break - if not line.startswith(header): - continue - line = line[len(header):].strip() - - for data in split_header_words([line]): - name, value = data[0] - standard = {} - rest = {} - for k in boolean_attrs: - standard[k] = False - for k, v in data[1:]: - if k is not None: - lc = k.lower() - else: - lc = None - # don't lose case distinction for unknown fields - if (lc in value_attrs) or (lc in boolean_attrs): - k = lc - if k in boolean_attrs: - if v is None: v = True - standard[k] = v - elif k in value_attrs: - standard[k] = v - else: - rest[k] = v - - h = standard.get - expires = h("expires") - discard = h("discard") - if expires is not None: - expires = iso2time(expires) - if expires is None: - discard = True - domain = h("domain") - domain_specified = domain.startswith(".") - c = Cookie(h("version"), name, value, - h("port"), h("port_spec"), - domain, domain_specified, h("domain_dot"), - h("path"), h("path_spec"), - h("secure"), - expires, - discard, - h("comment"), - h("commenturl"), - rest) - if not ignore_discard and c.discard: - continue - if not ignore_expires and c.is_expired(now): - continue - self.set_cookie(c) - - except IOError: - raise - except Exception: - _warn_unhandled_exception() - raise LoadError("invalid Set-Cookie3 format file %r: %r" % - (filename, line)) - - -class MozillaCookieJar(FileCookieJar): - """ - - WARNING: you may want to backup your browser's cookies file if you use - this class to save cookies. I *think* it works, but there have been - bugs in the past! - - This class differs from CookieJar only in the format it uses to save and - load cookies to and from a file. This class uses the Mozilla/Netscape - `cookies.txt' format. lynx uses this file format, too. - - Don't expect cookies saved while the browser is running to be noticed by - the browser (in fact, Mozilla on unix will overwrite your saved cookies if - you change them on disk while it's running; on Windows, you probably can't - save at all while the browser is running). - - Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to - Netscape cookies on saving. - - In particular, the cookie version and port number information is lost, - together with information about whether or not Path, Port and Discard were - specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the - domain as set in the HTTP header started with a dot (yes, I'm aware some - domains in Netscape files start with a dot and some don't -- trust me, you - really don't want to know any more about this). - - Note that though Mozilla and Netscape use the same format, they use - slightly different headers. The class saves cookies using the Netscape - header by default (Mozilla can cope with that). - - """ - magic_re = re.compile("#( Netscape)? HTTP Cookie File") - header = """\ -# Netscape HTTP Cookie File -# http://www.netscape.com/newsref/std/cookie_spec.html -# This is a generated file! Do not edit. - -""" - - def _really_load(self, f, filename, ignore_discard, ignore_expires): - now = time.time() - - magic = f.readline() - if not self.magic_re.search(magic): - f.close() - raise LoadError( - "%r does not look like a Netscape format cookies file" % - filename) - - try: - while 1: - line = f.readline() - if line == "": break - - # last field may be absent, so keep any trailing tab - if line.endswith("\n"): line = line[:-1] - - # skip comments and blank lines XXX what is $ for? - if (line.strip().startswith(("#", "$")) or - line.strip() == ""): - continue - - domain, domain_specified, path, secure, expires, name, value = \ - line.split("\t") - secure = (secure == "TRUE") - domain_specified = (domain_specified == "TRUE") - if name == "": - # cookies.txt regards 'Set-Cookie: foo' as a cookie - # with no name, whereas http.cookiejar regards it as a - # cookie with no value. - name = value - value = None - - initial_dot = domain.startswith(".") - assert domain_specified == initial_dot - - discard = False - if expires == "": - expires = None - discard = True - - # assume path_specified is false - c = Cookie(0, name, value, - None, False, - domain, domain_specified, initial_dot, - path, False, - secure, - expires, - discard, - None, - None, - {}) - if not ignore_discard and c.discard: - continue - if not ignore_expires and c.is_expired(now): - continue - self.set_cookie(c) - - except IOError: - raise - except Exception: - _warn_unhandled_exception() - raise LoadError("invalid Netscape format cookies file %r: %r" % - (filename, line)) - - def save(self, filename=None, ignore_discard=False, ignore_expires=False): - if filename is None: - if self.filename is not None: filename = self.filename - else: raise ValueError(MISSING_FILENAME_TEXT) - - f = open(filename, "w") - try: - f.write(self.header) - now = time.time() - for cookie in self: - if not ignore_discard and cookie.discard: - continue - if not ignore_expires and cookie.is_expired(now): - continue - if cookie.secure: secure = "TRUE" - else: secure = "FALSE" - if cookie.domain.startswith("."): initial_dot = "TRUE" - else: initial_dot = "FALSE" - if cookie.expires is not None: - expires = str(cookie.expires) - else: - expires = "" - if cookie.value is None: - # cookies.txt regards 'Set-Cookie: foo' as a cookie - # with no name, whereas http.cookiejar regards it as a - # cookie with no value. - name = "" - value = cookie.name - else: - name = cookie.name - value = cookie.value - f.write( - "\t".join([cookie.domain, initial_dot, cookie.path, - secure, expires, name, value])+ - "\n") - finally: - f.close() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/cookies.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/cookies.py deleted file mode 100755 index 8bb61e2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/cookies.py +++ /dev/null @@ -1,598 +0,0 @@ -#### -# Copyright 2000 by Timothy O'Malley -# -# All Rights Reserved -# -# Permission to use, copy, modify, and distribute this software -# and its documentation for any purpose and without fee is hereby -# granted, provided that the above copyright notice appear in all -# copies and that both that copyright notice and this permission -# notice appear in supporting documentation, and that the name of -# Timothy O'Malley not be used in advertising or publicity -# pertaining to distribution of the software without specific, written -# prior permission. -# -# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS -# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR -# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS -# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -# PERFORMANCE OF THIS SOFTWARE. -# -#### -# -# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp -# by Timothy O'Malley -# -# Cookie.py is a Python module for the handling of HTTP -# cookies as a Python dictionary. See RFC 2109 for more -# information on cookies. -# -# The original idea to treat Cookies as a dictionary came from -# Dave Mitchell (davem@magnet.com) in 1995, when he released the -# first version of nscookie.py. -# -#### - -r""" -http.cookies module ported to python-future from Py3.3 - -Here's a sample session to show how to use this module. -At the moment, this is the only documentation. - -The Basics ----------- - -Importing is easy... - - >>> from http import cookies - -Most of the time you start by creating a cookie. - - >>> C = cookies.SimpleCookie() - -Once you've created your Cookie, you can add values just as if it were -a dictionary. - - >>> C = cookies.SimpleCookie() - >>> C["fig"] = "newton" - >>> C["sugar"] = "wafer" - >>> C.output() - 'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer' - -Notice that the printable representation of a Cookie is the -appropriate format for a Set-Cookie: header. This is the -default behavior. You can change the header and printed -attributes by using the .output() function - - >>> C = cookies.SimpleCookie() - >>> C["rocky"] = "road" - >>> C["rocky"]["path"] = "/cookie" - >>> print(C.output(header="Cookie:")) - Cookie: rocky=road; Path=/cookie - >>> print(C.output(attrs=[], header="Cookie:")) - Cookie: rocky=road - -The load() method of a Cookie extracts cookies from a string. In a -CGI script, you would use this method to extract the cookies from the -HTTP_COOKIE environment variable. - - >>> C = cookies.SimpleCookie() - >>> C.load("chips=ahoy; vienna=finger") - >>> C.output() - 'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger' - -The load() method is darn-tootin smart about identifying cookies -within a string. Escaped quotation marks, nested semicolons, and other -such trickeries do not confuse it. - - >>> C = cookies.SimpleCookie() - >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";') - >>> print(C) - Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;" - -Each element of the Cookie also supports all of the RFC 2109 -Cookie attributes. Here's an example which sets the Path -attribute. - - >>> C = cookies.SimpleCookie() - >>> C["oreo"] = "doublestuff" - >>> C["oreo"]["path"] = "/" - >>> print(C) - Set-Cookie: oreo=doublestuff; Path=/ - -Each dictionary element has a 'value' attribute, which gives you -back the value associated with the key. - - >>> C = cookies.SimpleCookie() - >>> C["twix"] = "none for you" - >>> C["twix"].value - 'none for you' - -The SimpleCookie expects that all values should be standard strings. -Just to be sure, SimpleCookie invokes the str() builtin to convert -the value to a string, when the values are set dictionary-style. - - >>> C = cookies.SimpleCookie() - >>> C["number"] = 7 - >>> C["string"] = "seven" - >>> C["number"].value - '7' - >>> C["string"].value - 'seven' - >>> C.output() - 'Set-Cookie: number=7\r\nSet-Cookie: string=seven' - -Finis. -""" -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -from future.builtins import chr, dict, int, str -from future.utils import PY2, as_native_str - -# -# Import our required modules -# -import re -if PY2: - re.ASCII = 0 # for py2 compatibility -import string - -__all__ = ["CookieError", "BaseCookie", "SimpleCookie"] - -_nulljoin = ''.join -_semispacejoin = '; '.join -_spacejoin = ' '.join - -# -# Define an exception visible to External modules -# -class CookieError(Exception): - pass - - -# These quoting routines conform to the RFC2109 specification, which in -# turn references the character definitions from RFC2068. They provide -# a two-way quoting algorithm. Any non-text character is translated -# into a 4 character sequence: a forward-slash followed by the -# three-digit octal equivalent of the character. Any '\' or '"' is -# quoted with a preceeding '\' slash. -# -# These are taken from RFC2068 and RFC2109. -# _LegalChars is the list of chars which don't require "'s -# _Translator hash-table for fast quoting -# -_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:" -_Translator = { - '\000' : '\\000', '\001' : '\\001', '\002' : '\\002', - '\003' : '\\003', '\004' : '\\004', '\005' : '\\005', - '\006' : '\\006', '\007' : '\\007', '\010' : '\\010', - '\011' : '\\011', '\012' : '\\012', '\013' : '\\013', - '\014' : '\\014', '\015' : '\\015', '\016' : '\\016', - '\017' : '\\017', '\020' : '\\020', '\021' : '\\021', - '\022' : '\\022', '\023' : '\\023', '\024' : '\\024', - '\025' : '\\025', '\026' : '\\026', '\027' : '\\027', - '\030' : '\\030', '\031' : '\\031', '\032' : '\\032', - '\033' : '\\033', '\034' : '\\034', '\035' : '\\035', - '\036' : '\\036', '\037' : '\\037', - - # Because of the way browsers really handle cookies (as opposed - # to what the RFC says) we also encode , and ; - - ',' : '\\054', ';' : '\\073', - - '"' : '\\"', '\\' : '\\\\', - - '\177' : '\\177', '\200' : '\\200', '\201' : '\\201', - '\202' : '\\202', '\203' : '\\203', '\204' : '\\204', - '\205' : '\\205', '\206' : '\\206', '\207' : '\\207', - '\210' : '\\210', '\211' : '\\211', '\212' : '\\212', - '\213' : '\\213', '\214' : '\\214', '\215' : '\\215', - '\216' : '\\216', '\217' : '\\217', '\220' : '\\220', - '\221' : '\\221', '\222' : '\\222', '\223' : '\\223', - '\224' : '\\224', '\225' : '\\225', '\226' : '\\226', - '\227' : '\\227', '\230' : '\\230', '\231' : '\\231', - '\232' : '\\232', '\233' : '\\233', '\234' : '\\234', - '\235' : '\\235', '\236' : '\\236', '\237' : '\\237', - '\240' : '\\240', '\241' : '\\241', '\242' : '\\242', - '\243' : '\\243', '\244' : '\\244', '\245' : '\\245', - '\246' : '\\246', '\247' : '\\247', '\250' : '\\250', - '\251' : '\\251', '\252' : '\\252', '\253' : '\\253', - '\254' : '\\254', '\255' : '\\255', '\256' : '\\256', - '\257' : '\\257', '\260' : '\\260', '\261' : '\\261', - '\262' : '\\262', '\263' : '\\263', '\264' : '\\264', - '\265' : '\\265', '\266' : '\\266', '\267' : '\\267', - '\270' : '\\270', '\271' : '\\271', '\272' : '\\272', - '\273' : '\\273', '\274' : '\\274', '\275' : '\\275', - '\276' : '\\276', '\277' : '\\277', '\300' : '\\300', - '\301' : '\\301', '\302' : '\\302', '\303' : '\\303', - '\304' : '\\304', '\305' : '\\305', '\306' : '\\306', - '\307' : '\\307', '\310' : '\\310', '\311' : '\\311', - '\312' : '\\312', '\313' : '\\313', '\314' : '\\314', - '\315' : '\\315', '\316' : '\\316', '\317' : '\\317', - '\320' : '\\320', '\321' : '\\321', '\322' : '\\322', - '\323' : '\\323', '\324' : '\\324', '\325' : '\\325', - '\326' : '\\326', '\327' : '\\327', '\330' : '\\330', - '\331' : '\\331', '\332' : '\\332', '\333' : '\\333', - '\334' : '\\334', '\335' : '\\335', '\336' : '\\336', - '\337' : '\\337', '\340' : '\\340', '\341' : '\\341', - '\342' : '\\342', '\343' : '\\343', '\344' : '\\344', - '\345' : '\\345', '\346' : '\\346', '\347' : '\\347', - '\350' : '\\350', '\351' : '\\351', '\352' : '\\352', - '\353' : '\\353', '\354' : '\\354', '\355' : '\\355', - '\356' : '\\356', '\357' : '\\357', '\360' : '\\360', - '\361' : '\\361', '\362' : '\\362', '\363' : '\\363', - '\364' : '\\364', '\365' : '\\365', '\366' : '\\366', - '\367' : '\\367', '\370' : '\\370', '\371' : '\\371', - '\372' : '\\372', '\373' : '\\373', '\374' : '\\374', - '\375' : '\\375', '\376' : '\\376', '\377' : '\\377' - } - -def _quote(str, LegalChars=_LegalChars): - r"""Quote a string for use in a cookie header. - - If the string does not need to be double-quoted, then just return the - string. Otherwise, surround the string in doublequotes and quote - (with a \) special characters. - """ - if all(c in LegalChars for c in str): - return str - else: - return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"' - - -_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") -_QuotePatt = re.compile(r"[\\].") - -def _unquote(mystr): - # If there aren't any doublequotes, - # then there can't be any special characters. See RFC 2109. - if len(mystr) < 2: - return mystr - if mystr[0] != '"' or mystr[-1] != '"': - return mystr - - # We have to assume that we must decode this string. - # Down to work. - - # Remove the "s - mystr = mystr[1:-1] - - # Check for special sequences. Examples: - # \012 --> \n - # \" --> " - # - i = 0 - n = len(mystr) - res = [] - while 0 <= i < n: - o_match = _OctalPatt.search(mystr, i) - q_match = _QuotePatt.search(mystr, i) - if not o_match and not q_match: # Neither matched - res.append(mystr[i:]) - break - # else: - j = k = -1 - if o_match: - j = o_match.start(0) - if q_match: - k = q_match.start(0) - if q_match and (not o_match or k < j): # QuotePatt matched - res.append(mystr[i:k]) - res.append(mystr[k+1]) - i = k + 2 - else: # OctalPatt matched - res.append(mystr[i:j]) - res.append(chr(int(mystr[j+1:j+4], 8))) - i = j + 4 - return _nulljoin(res) - -# The _getdate() routine is used to set the expiration time in the cookie's HTTP -# header. By default, _getdate() returns the current time in the appropriate -# "expires" format for a Set-Cookie header. The one optional argument is an -# offset from now, in seconds. For example, an offset of -3600 means "one hour -# ago". The offset may be a floating point number. -# - -_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - -_monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - -def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname): - from time import gmtime, time - now = time() - year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) - return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \ - (weekdayname[wd], day, monthname[month], year, hh, mm, ss) - - -class Morsel(dict): - """A class to hold ONE (key, value) pair. - - In a cookie, each such pair may have several attributes, so this class is - used to keep the attributes associated with the appropriate key,value pair. - This class also includes a coded_value attribute, which is used to hold - the network representation of the value. This is most useful when Python - objects are pickled for network transit. - """ - # RFC 2109 lists these attributes as reserved: - # path comment domain - # max-age secure version - # - # For historical reasons, these attributes are also reserved: - # expires - # - # This is an extension from Microsoft: - # httponly - # - # This dictionary provides a mapping from the lowercase - # variant on the left to the appropriate traditional - # formatting on the right. - _reserved = { - "expires" : "expires", - "path" : "Path", - "comment" : "Comment", - "domain" : "Domain", - "max-age" : "Max-Age", - "secure" : "secure", - "httponly" : "httponly", - "version" : "Version", - } - - _flags = set(['secure', 'httponly']) - - def __init__(self): - # Set defaults - self.key = self.value = self.coded_value = None - - # Set default attributes - for key in self._reserved: - dict.__setitem__(self, key, "") - - def __setitem__(self, K, V): - K = K.lower() - if not K in self._reserved: - raise CookieError("Invalid Attribute %s" % K) - dict.__setitem__(self, K, V) - - def isReservedKey(self, K): - return K.lower() in self._reserved - - def set(self, key, val, coded_val, LegalChars=_LegalChars): - # First we verify that the key isn't a reserved word - # Second we make sure it only contains legal characters - if key.lower() in self._reserved: - raise CookieError("Attempt to set a reserved key: %s" % key) - if any(c not in LegalChars for c in key): - raise CookieError("Illegal key value: %s" % key) - - # It's a good key, so save it. - self.key = key - self.value = val - self.coded_value = coded_val - - def output(self, attrs=None, header="Set-Cookie:"): - return "%s %s" % (header, self.OutputString(attrs)) - - __str__ = output - - @as_native_str() - def __repr__(self): - if PY2 and isinstance(self.value, unicode): - val = str(self.value) # make it a newstr to remove the u prefix - else: - val = self.value - return '<%s: %s=%s>' % (self.__class__.__name__, - str(self.key), repr(val)) - - def js_output(self, attrs=None): - # Print javascript - return """ - - """ % (self.OutputString(attrs).replace('"', r'\"')) - - def OutputString(self, attrs=None): - # Build up our result - # - result = [] - append = result.append - - # First, the key=value pair - append("%s=%s" % (self.key, self.coded_value)) - - # Now add any defined attributes - if attrs is None: - attrs = self._reserved - items = sorted(self.items()) - for key, value in items: - if value == "": - continue - if key not in attrs: - continue - if key == "expires" and isinstance(value, int): - append("%s=%s" % (self._reserved[key], _getdate(value))) - elif key == "max-age" and isinstance(value, int): - append("%s=%d" % (self._reserved[key], value)) - elif key == "secure": - append(str(self._reserved[key])) - elif key == "httponly": - append(str(self._reserved[key])) - else: - append("%s=%s" % (self._reserved[key], value)) - - # Return the result - return _semispacejoin(result) - - -# -# Pattern for finding cookie -# -# This used to be strict parsing based on the RFC2109 and RFC2068 -# specifications. I have since discovered that MSIE 3.0x doesn't -# follow the character rules outlined in those specs. As a -# result, the parsing rules here are less strict. -# - -_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" -_CookiePattern = re.compile(r""" - (?x) # This is a verbose pattern - (?P # Start of group 'key' - """ + _LegalCharsPatt + r"""+? # Any word of at least one letter - ) # End of group 'key' - ( # Optional group: there may not be a value. - \s*=\s* # Equal Sign - (?P # Start of group 'val' - "(?:[^\\"]|\\.)*" # Any doublequoted string - | # or - \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr - | # or - """ + _LegalCharsPatt + r"""* # Any word or empty string - ) # End of group 'val' - )? # End of optional value group - \s* # Any number of spaces. - (\s+|;|$) # Ending either at space, semicolon, or EOS. - """, re.ASCII) # May be removed if safe. - - -# At long last, here is the cookie class. Using this class is almost just like -# using a dictionary. See this module's docstring for example usage. -# -class BaseCookie(dict): - """A container class for a set of Morsels.""" - - def value_decode(self, val): - """real_value, coded_value = value_decode(STRING) - Called prior to setting a cookie's value from the network - representation. The VALUE is the value read from HTTP - header. - Override this function to modify the behavior of cookies. - """ - return val, val - - def value_encode(self, val): - """real_value, coded_value = value_encode(VALUE) - Called prior to setting a cookie's value from the dictionary - representation. The VALUE is the value being assigned. - Override this function to modify the behavior of cookies. - """ - strval = str(val) - return strval, strval - - def __init__(self, input=None): - if input: - self.load(input) - - def __set(self, key, real_value, coded_value): - """Private method for setting a cookie's value""" - M = self.get(key, Morsel()) - M.set(key, real_value, coded_value) - dict.__setitem__(self, key, M) - - def __setitem__(self, key, value): - """Dictionary style assignment.""" - rval, cval = self.value_encode(value) - self.__set(key, rval, cval) - - def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): - """Return a string suitable for HTTP.""" - result = [] - items = sorted(self.items()) - for key, value in items: - result.append(value.output(attrs, header)) - return sep.join(result) - - __str__ = output - - @as_native_str() - def __repr__(self): - l = [] - items = sorted(self.items()) - for key, value in items: - if PY2 and isinstance(value.value, unicode): - val = str(value.value) # make it a newstr to remove the u prefix - else: - val = value.value - l.append('%s=%s' % (str(key), repr(val))) - return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l)) - - def js_output(self, attrs=None): - """Return a string suitable for JavaScript.""" - result = [] - items = sorted(self.items()) - for key, value in items: - result.append(value.js_output(attrs)) - return _nulljoin(result) - - def load(self, rawdata): - """Load cookies from a string (presumably HTTP_COOKIE) or - from a dictionary. Loading cookies from a dictionary 'd' - is equivalent to calling: - map(Cookie.__setitem__, d.keys(), d.values()) - """ - if isinstance(rawdata, str): - self.__parse_string(rawdata) - else: - # self.update() wouldn't call our custom __setitem__ - for key, value in rawdata.items(): - self[key] = value - return - - def __parse_string(self, mystr, patt=_CookiePattern): - i = 0 # Our starting point - n = len(mystr) # Length of string - M = None # current morsel - - while 0 <= i < n: - # Start looking for a cookie - match = patt.search(mystr, i) - if not match: - # No more cookies - break - - key, value = match.group("key"), match.group("val") - - i = match.end(0) - - # Parse the key, value in case it's metainfo - if key[0] == "$": - # We ignore attributes which pertain to the cookie - # mechanism as a whole. See RFC 2109. - # (Does anyone care?) - if M: - M[key[1:]] = value - elif key.lower() in Morsel._reserved: - if M: - if value is None: - if key.lower() in Morsel._flags: - M[key] = True - else: - M[key] = _unquote(value) - elif value is not None: - rval, cval = self.value_decode(value) - self.__set(key, rval, cval) - M = self[key] - - -class SimpleCookie(BaseCookie): - """ - SimpleCookie supports strings as cookie values. When setting - the value using the dictionary assignment notation, SimpleCookie - calls the builtin str() to convert the value to a string. Values - received from HTTP are kept as strings. - """ - def value_decode(self, val): - return _unquote(val), val - - def value_encode(self, val): - strval = str(val) - return strval, _quote(strval) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/server.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/server.py deleted file mode 100755 index b1c11e0..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/server.py +++ /dev/null @@ -1,1226 +0,0 @@ -"""HTTP server classes. - -From Python 3.3 - -Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see -SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, -and CGIHTTPRequestHandler for CGI scripts. - -It does, however, optionally implement HTTP/1.1 persistent connections, -as of version 0.3. - -Notes on CGIHTTPRequestHandler ------------------------------- - -This class implements GET and POST requests to cgi-bin scripts. - -If the os.fork() function is not present (e.g. on Windows), -subprocess.Popen() is used as a fallback, with slightly altered semantics. - -In all cases, the implementation is intentionally naive -- all -requests are executed synchronously. - -SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL --- it may execute arbitrary Python code or external programs. - -Note that status code 200 is sent prior to execution of a CGI script, so -scripts cannot send other status codes such as 302 (redirect). - -XXX To do: - -- log requests even later (to capture byte count) -- log user-agent header and other interesting goodies -- send error log to separate file -""" - -from __future__ import (absolute_import, division, - print_function, unicode_literals) -from future import utils -from future.builtins import * - - -# See also: -# -# HTTP Working Group T. Berners-Lee -# INTERNET-DRAFT R. T. Fielding -# H. Frystyk Nielsen -# Expires September 8, 1995 March 8, 1995 -# -# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt -# -# and -# -# Network Working Group R. Fielding -# Request for Comments: 2616 et al -# Obsoletes: 2068 June 1999 -# Category: Standards Track -# -# URL: http://www.faqs.org/rfcs/rfc2616.html - -# Log files -# --------- -# -# Here's a quote from the NCSA httpd docs about log file format. -# -# | The logfile format is as follows. Each line consists of: -# | -# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb -# | -# | host: Either the DNS name or the IP number of the remote client -# | rfc931: Any information returned by identd for this person, -# | - otherwise. -# | authuser: If user sent a userid for authentication, the user name, -# | - otherwise. -# | DD: Day -# | Mon: Month (calendar name) -# | YYYY: Year -# | hh: hour (24-hour format, the machine's timezone) -# | mm: minutes -# | ss: seconds -# | request: The first line of the HTTP request as sent by the client. -# | ddd: the status code returned by the server, - if not available. -# | bbbb: the total number of bytes sent, -# | *not including the HTTP/1.0 header*, - if not available -# | -# | You can determine the name of the file accessed through request. -# -# (Actually, the latter is only true if you know the server configuration -# at the time the request was made!) - -__version__ = "0.6" - -__all__ = ["HTTPServer", "BaseHTTPRequestHandler"] - -from future.backports import html -from future.backports.http import client as http_client -from future.backports.urllib import parse as urllib_parse -from future.backports import socketserver - -import io -import mimetypes -import os -import posixpath -import select -import shutil -import socket # For gethostbyaddr() -import sys -import time -import copy -import argparse - - -# Default error message template -DEFAULT_ERROR_MESSAGE = """\ - - - - - Error response - - -

Error response

-

Error code: %(code)d

-

Message: %(message)s.

-

Error code explanation: %(code)s - %(explain)s.

- - -""" - -DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8" - -def _quote_html(html): - return html.replace("&", "&").replace("<", "<").replace(">", ">") - -class HTTPServer(socketserver.TCPServer): - - allow_reuse_address = 1 # Seems to make sense in testing environment - - def server_bind(self): - """Override server_bind to store the server name.""" - socketserver.TCPServer.server_bind(self) - host, port = self.socket.getsockname()[:2] - self.server_name = socket.getfqdn(host) - self.server_port = port - - -class BaseHTTPRequestHandler(socketserver.StreamRequestHandler): - - """HTTP request handler base class. - - The following explanation of HTTP serves to guide you through the - code as well as to expose any misunderstandings I may have about - HTTP (so you don't need to read the code to figure out I'm wrong - :-). - - HTTP (HyperText Transfer Protocol) is an extensible protocol on - top of a reliable stream transport (e.g. TCP/IP). The protocol - recognizes three parts to a request: - - 1. One line identifying the request type and path - 2. An optional set of RFC-822-style headers - 3. An optional data part - - The headers and data are separated by a blank line. - - The first line of the request has the form - - - - where is a (case-sensitive) keyword such as GET or POST, - is a string containing path information for the request, - and should be the string "HTTP/1.0" or "HTTP/1.1". - is encoded using the URL encoding scheme (using %xx to signify - the ASCII character with hex code xx). - - The specification specifies that lines are separated by CRLF but - for compatibility with the widest range of clients recommends - servers also handle LF. Similarly, whitespace in the request line - is treated sensibly (allowing multiple spaces between components - and allowing trailing whitespace). - - Similarly, for output, lines ought to be separated by CRLF pairs - but most clients grok LF characters just fine. - - If the first line of the request has the form - - - - (i.e. is left out) then this is assumed to be an HTTP - 0.9 request; this form has no optional headers and data part and - the reply consists of just the data. - - The reply form of the HTTP 1.x protocol again has three parts: - - 1. One line giving the response code - 2. An optional set of RFC-822-style headers - 3. The data - - Again, the headers and data are separated by a blank line. - - The response code line has the form - - - - where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), - is a 3-digit response code indicating success or - failure of the request, and is an optional - human-readable string explaining what the response code means. - - This server parses the request and the headers, and then calls a - function specific to the request type (). Specifically, - a request SPAM will be handled by a method do_SPAM(). If no - such method exists the server sends an error response to the - client. If it exists, it is called with no arguments: - - do_SPAM() - - Note that the request name is case sensitive (i.e. SPAM and spam - are different requests). - - The various request details are stored in instance variables: - - - client_address is the client IP address in the form (host, - port); - - - command, path and version are the broken-down request line; - - - headers is an instance of email.message.Message (or a derived - class) containing the header information; - - - rfile is a file object open for reading positioned at the - start of the optional input data part; - - - wfile is a file object open for writing. - - IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! - - The first thing to be written must be the response line. Then - follow 0 or more header lines, then a blank line, and then the - actual data (if any). The meaning of the header lines depends on - the command executed by the server; in most cases, when data is - returned, there should be at least one header line of the form - - Content-type: / - - where and should be registered MIME types, - e.g. "text/html" or "text/plain". - - """ - - # The Python system version, truncated to its first component. - sys_version = "Python/" + sys.version.split()[0] - - # The server software version. You may want to override this. - # The format is multiple whitespace-separated strings, - # where each string is of the form name[/version]. - server_version = "BaseHTTP/" + __version__ - - error_message_format = DEFAULT_ERROR_MESSAGE - error_content_type = DEFAULT_ERROR_CONTENT_TYPE - - # The default request version. This only affects responses up until - # the point where the request line is parsed, so it mainly decides what - # the client gets back when sending a malformed request line. - # Most web servers default to HTTP 0.9, i.e. don't send a status line. - default_request_version = "HTTP/0.9" - - def parse_request(self): - """Parse a request (internal). - - The request should be stored in self.raw_requestline; the results - are in self.command, self.path, self.request_version and - self.headers. - - Return True for success, False for failure; on failure, an - error is sent back. - - """ - self.command = None # set in case of error on the first line - self.request_version = version = self.default_request_version - self.close_connection = 1 - requestline = str(self.raw_requestline, 'iso-8859-1') - requestline = requestline.rstrip('\r\n') - self.requestline = requestline - words = requestline.split() - if len(words) == 3: - command, path, version = words - if version[:5] != 'HTTP/': - self.send_error(400, "Bad request version (%r)" % version) - return False - try: - base_version_number = version.split('/', 1)[1] - version_number = base_version_number.split(".") - # RFC 2145 section 3.1 says there can be only one "." and - # - major and minor numbers MUST be treated as - # separate integers; - # - HTTP/2.4 is a lower version than HTTP/2.13, which in - # turn is lower than HTTP/12.3; - # - Leading zeros MUST be ignored by recipients. - if len(version_number) != 2: - raise ValueError - version_number = int(version_number[0]), int(version_number[1]) - except (ValueError, IndexError): - self.send_error(400, "Bad request version (%r)" % version) - return False - if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": - self.close_connection = 0 - if version_number >= (2, 0): - self.send_error(505, - "Invalid HTTP Version (%s)" % base_version_number) - return False - elif len(words) == 2: - command, path = words - self.close_connection = 1 - if command != 'GET': - self.send_error(400, - "Bad HTTP/0.9 request type (%r)" % command) - return False - elif not words: - return False - else: - self.send_error(400, "Bad request syntax (%r)" % requestline) - return False - self.command, self.path, self.request_version = command, path, version - - # Examine the headers and look for a Connection directive. - try: - self.headers = http_client.parse_headers(self.rfile, - _class=self.MessageClass) - except http_client.LineTooLong: - self.send_error(400, "Line too long") - return False - - conntype = self.headers.get('Connection', "") - if conntype.lower() == 'close': - self.close_connection = 1 - elif (conntype.lower() == 'keep-alive' and - self.protocol_version >= "HTTP/1.1"): - self.close_connection = 0 - # Examine the headers and look for an Expect directive - expect = self.headers.get('Expect', "") - if (expect.lower() == "100-continue" and - self.protocol_version >= "HTTP/1.1" and - self.request_version >= "HTTP/1.1"): - if not self.handle_expect_100(): - return False - return True - - def handle_expect_100(self): - """Decide what to do with an "Expect: 100-continue" header. - - If the client is expecting a 100 Continue response, we must - respond with either a 100 Continue or a final response before - waiting for the request body. The default is to always respond - with a 100 Continue. You can behave differently (for example, - reject unauthorized requests) by overriding this method. - - This method should either return True (possibly after sending - a 100 Continue response) or send an error response and return - False. - - """ - self.send_response_only(100) - self.flush_headers() - return True - - def handle_one_request(self): - """Handle a single HTTP request. - - You normally don't need to override this method; see the class - __doc__ string for information on how to handle specific HTTP - commands such as GET and POST. - - """ - try: - self.raw_requestline = self.rfile.readline(65537) - if len(self.raw_requestline) > 65536: - self.requestline = '' - self.request_version = '' - self.command = '' - self.send_error(414) - return - if not self.raw_requestline: - self.close_connection = 1 - return - if not self.parse_request(): - # An error code has been sent, just exit - return - mname = 'do_' + self.command - if not hasattr(self, mname): - self.send_error(501, "Unsupported method (%r)" % self.command) - return - method = getattr(self, mname) - method() - self.wfile.flush() #actually send the response if not already done. - except socket.timeout as e: - #a read or a write timed out. Discard this connection - self.log_error("Request timed out: %r", e) - self.close_connection = 1 - return - - def handle(self): - """Handle multiple requests if necessary.""" - self.close_connection = 1 - - self.handle_one_request() - while not self.close_connection: - self.handle_one_request() - - def send_error(self, code, message=None): - """Send and log an error reply. - - Arguments are the error code, and a detailed message. - The detailed message defaults to the short entry matching the - response code. - - This sends an error response (so it must be called before any - output has been generated), logs the error, and finally sends - a piece of HTML explaining the error to the user. - - """ - - try: - shortmsg, longmsg = self.responses[code] - except KeyError: - shortmsg, longmsg = '???', '???' - if message is None: - message = shortmsg - explain = longmsg - self.log_error("code %d, message %s", code, message) - # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201) - content = (self.error_message_format % - {'code': code, 'message': _quote_html(message), 'explain': explain}) - self.send_response(code, message) - self.send_header("Content-Type", self.error_content_type) - self.send_header('Connection', 'close') - self.end_headers() - if self.command != 'HEAD' and code >= 200 and code not in (204, 304): - self.wfile.write(content.encode('UTF-8', 'replace')) - - def send_response(self, code, message=None): - """Add the response header to the headers buffer and log the - response code. - - Also send two standard headers with the server software - version and the current date. - - """ - self.log_request(code) - self.send_response_only(code, message) - self.send_header('Server', self.version_string()) - self.send_header('Date', self.date_time_string()) - - def send_response_only(self, code, message=None): - """Send the response header only.""" - if message is None: - if code in self.responses: - message = self.responses[code][0] - else: - message = '' - if self.request_version != 'HTTP/0.9': - if not hasattr(self, '_headers_buffer'): - self._headers_buffer = [] - self._headers_buffer.append(("%s %d %s\r\n" % - (self.protocol_version, code, message)).encode( - 'latin-1', 'strict')) - - def send_header(self, keyword, value): - """Send a MIME header to the headers buffer.""" - if self.request_version != 'HTTP/0.9': - if not hasattr(self, '_headers_buffer'): - self._headers_buffer = [] - self._headers_buffer.append( - ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict')) - - if keyword.lower() == 'connection': - if value.lower() == 'close': - self.close_connection = 1 - elif value.lower() == 'keep-alive': - self.close_connection = 0 - - def end_headers(self): - """Send the blank line ending the MIME headers.""" - if self.request_version != 'HTTP/0.9': - self._headers_buffer.append(b"\r\n") - self.flush_headers() - - def flush_headers(self): - if hasattr(self, '_headers_buffer'): - self.wfile.write(b"".join(self._headers_buffer)) - self._headers_buffer = [] - - def log_request(self, code='-', size='-'): - """Log an accepted request. - - This is called by send_response(). - - """ - - self.log_message('"%s" %s %s', - self.requestline, str(code), str(size)) - - def log_error(self, format, *args): - """Log an error. - - This is called when a request cannot be fulfilled. By - default it passes the message on to log_message(). - - Arguments are the same as for log_message(). - - XXX This should go to the separate error log. - - """ - - self.log_message(format, *args) - - def log_message(self, format, *args): - """Log an arbitrary message. - - This is used by all other logging functions. Override - it if you have specific logging wishes. - - The first argument, FORMAT, is a format string for the - message to be logged. If the format string contains - any % escapes requiring parameters, they should be - specified as subsequent arguments (it's just like - printf!). - - The client ip and current date/time are prefixed to - every message. - - """ - - sys.stderr.write("%s - - [%s] %s\n" % - (self.address_string(), - self.log_date_time_string(), - format%args)) - - def version_string(self): - """Return the server software version string.""" - return self.server_version + ' ' + self.sys_version - - def date_time_string(self, timestamp=None): - """Return the current date and time formatted for a message header.""" - if timestamp is None: - timestamp = time.time() - year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) - s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( - self.weekdayname[wd], - day, self.monthname[month], year, - hh, mm, ss) - return s - - def log_date_time_string(self): - """Return the current time formatted for logging.""" - now = time.time() - year, month, day, hh, mm, ss, x, y, z = time.localtime(now) - s = "%02d/%3s/%04d %02d:%02d:%02d" % ( - day, self.monthname[month], year, hh, mm, ss) - return s - - weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - - def address_string(self): - """Return the client address.""" - - return self.client_address[0] - - # Essentially static class variables - - # The version of the HTTP protocol we support. - # Set this to HTTP/1.1 to enable automatic keepalive - protocol_version = "HTTP/1.0" - - # MessageClass used to parse headers - MessageClass = http_client.HTTPMessage - - # Table mapping response codes to messages; entries have the - # form {code: (shortmessage, longmessage)}. - # See RFC 2616 and 6585. - responses = { - 100: ('Continue', 'Request received, please continue'), - 101: ('Switching Protocols', - 'Switching to new protocol; obey Upgrade header'), - - 200: ('OK', 'Request fulfilled, document follows'), - 201: ('Created', 'Document created, URL follows'), - 202: ('Accepted', - 'Request accepted, processing continues off-line'), - 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), - 204: ('No Content', 'Request fulfilled, nothing follows'), - 205: ('Reset Content', 'Clear input form for further input.'), - 206: ('Partial Content', 'Partial content follows.'), - - 300: ('Multiple Choices', - 'Object has several resources -- see URI list'), - 301: ('Moved Permanently', 'Object moved permanently -- see URI list'), - 302: ('Found', 'Object moved temporarily -- see URI list'), - 303: ('See Other', 'Object moved -- see Method and URL list'), - 304: ('Not Modified', - 'Document has not changed since given time'), - 305: ('Use Proxy', - 'You must use proxy specified in Location to access this ' - 'resource.'), - 307: ('Temporary Redirect', - 'Object moved temporarily -- see URI list'), - - 400: ('Bad Request', - 'Bad request syntax or unsupported method'), - 401: ('Unauthorized', - 'No permission -- see authorization schemes'), - 402: ('Payment Required', - 'No payment -- see charging schemes'), - 403: ('Forbidden', - 'Request forbidden -- authorization will not help'), - 404: ('Not Found', 'Nothing matches the given URI'), - 405: ('Method Not Allowed', - 'Specified method is invalid for this resource.'), - 406: ('Not Acceptable', 'URI not available in preferred format.'), - 407: ('Proxy Authentication Required', 'You must authenticate with ' - 'this proxy before proceeding.'), - 408: ('Request Timeout', 'Request timed out; try again later.'), - 409: ('Conflict', 'Request conflict.'), - 410: ('Gone', - 'URI no longer exists and has been permanently removed.'), - 411: ('Length Required', 'Client must specify Content-Length.'), - 412: ('Precondition Failed', 'Precondition in headers is false.'), - 413: ('Request Entity Too Large', 'Entity is too large.'), - 414: ('Request-URI Too Long', 'URI is too long.'), - 415: ('Unsupported Media Type', 'Entity body in unsupported format.'), - 416: ('Requested Range Not Satisfiable', - 'Cannot satisfy request range.'), - 417: ('Expectation Failed', - 'Expect condition could not be satisfied.'), - 428: ('Precondition Required', - 'The origin server requires the request to be conditional.'), - 429: ('Too Many Requests', 'The user has sent too many requests ' - 'in a given amount of time ("rate limiting").'), - 431: ('Request Header Fields Too Large', 'The server is unwilling to ' - 'process the request because its header fields are too large.'), - - 500: ('Internal Server Error', 'Server got itself in trouble'), - 501: ('Not Implemented', - 'Server does not support this operation'), - 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'), - 503: ('Service Unavailable', - 'The server cannot process the request due to a high load'), - 504: ('Gateway Timeout', - 'The gateway server did not receive a timely response'), - 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'), - 511: ('Network Authentication Required', - 'The client needs to authenticate to gain network access.'), - } - - -class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): - - """Simple HTTP request handler with GET and HEAD commands. - - This serves files from the current directory and any of its - subdirectories. The MIME type for files is determined by - calling the .guess_type() method. - - The GET and HEAD requests are identical except that the HEAD - request omits the actual contents of the file. - - """ - - server_version = "SimpleHTTP/" + __version__ - - def do_GET(self): - """Serve a GET request.""" - f = self.send_head() - if f: - self.copyfile(f, self.wfile) - f.close() - - def do_HEAD(self): - """Serve a HEAD request.""" - f = self.send_head() - if f: - f.close() - - def send_head(self): - """Common code for GET and HEAD commands. - - This sends the response code and MIME headers. - - Return value is either a file object (which has to be copied - to the outputfile by the caller unless the command was HEAD, - and must be closed by the caller under all circumstances), or - None, in which case the caller has nothing further to do. - - """ - path = self.translate_path(self.path) - f = None - if os.path.isdir(path): - if not self.path.endswith('/'): - # redirect browser - doing basically what apache does - self.send_response(301) - self.send_header("Location", self.path + "/") - self.end_headers() - return None - for index in "index.html", "index.htm": - index = os.path.join(path, index) - if os.path.exists(index): - path = index - break - else: - return self.list_directory(path) - ctype = self.guess_type(path) - try: - f = open(path, 'rb') - except IOError: - self.send_error(404, "File not found") - return None - self.send_response(200) - self.send_header("Content-type", ctype) - fs = os.fstat(f.fileno()) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) - self.end_headers() - return f - - def list_directory(self, path): - """Helper to produce a directory listing (absent index.html). - - Return value is either a file object, or None (indicating an - error). In either case, the headers are sent, making the - interface the same as for send_head(). - - """ - try: - list = os.listdir(path) - except os.error: - self.send_error(404, "No permission to list directory") - return None - list.sort(key=lambda a: a.lower()) - r = [] - displaypath = html.escape(urllib_parse.unquote(self.path)) - enc = sys.getfilesystemencoding() - title = 'Directory listing for %s' % displaypath - r.append('') - r.append('\n') - r.append('' % enc) - r.append('%s\n' % title) - r.append('\n

%s

' % title) - r.append('
\n
    ') - for name in list: - fullname = os.path.join(path, name) - displayname = linkname = name - # Append / for directories or @ for symbolic links - if os.path.isdir(fullname): - displayname = name + "/" - linkname = name + "/" - if os.path.islink(fullname): - displayname = name + "@" - # Note: a link to a directory displays with @ and links with / - r.append('
  • %s
  • ' - % (urllib_parse.quote(linkname), html.escape(displayname))) - # # Use this instead: - # r.append('
  • %s
  • ' - # % (urllib.quote(linkname), cgi.escape(displayname))) - r.append('
\n
\n\n\n') - encoded = '\n'.join(r).encode(enc) - f = io.BytesIO() - f.write(encoded) - f.seek(0) - self.send_response(200) - self.send_header("Content-type", "text/html; charset=%s" % enc) - self.send_header("Content-Length", str(len(encoded))) - self.end_headers() - return f - - def translate_path(self, path): - """Translate a /-separated PATH to the local filename syntax. - - Components that mean special things to the local file system - (e.g. drive or directory names) are ignored. (XXX They should - probably be diagnosed.) - - """ - # abandon query parameters - path = path.split('?',1)[0] - path = path.split('#',1)[0] - path = posixpath.normpath(urllib_parse.unquote(path)) - words = path.split('/') - words = filter(None, words) - path = os.getcwd() - for word in words: - drive, word = os.path.splitdrive(word) - head, word = os.path.split(word) - if word in (os.curdir, os.pardir): continue - path = os.path.join(path, word) - return path - - def copyfile(self, source, outputfile): - """Copy all data between two file objects. - - The SOURCE argument is a file object open for reading - (or anything with a read() method) and the DESTINATION - argument is a file object open for writing (or - anything with a write() method). - - The only reason for overriding this would be to change - the block size or perhaps to replace newlines by CRLF - -- note however that this the default server uses this - to copy binary data as well. - - """ - shutil.copyfileobj(source, outputfile) - - def guess_type(self, path): - """Guess the type of a file. - - Argument is a PATH (a filename). - - Return value is a string of the form type/subtype, - usable for a MIME Content-type header. - - The default implementation looks the file's extension - up in the table self.extensions_map, using application/octet-stream - as a default; however it would be permissible (if - slow) to look inside the data to make a better guess. - - """ - - base, ext = posixpath.splitext(path) - if ext in self.extensions_map: - return self.extensions_map[ext] - ext = ext.lower() - if ext in self.extensions_map: - return self.extensions_map[ext] - else: - return self.extensions_map[''] - - if not mimetypes.inited: - mimetypes.init() # try to read system mime.types - extensions_map = mimetypes.types_map.copy() - extensions_map.update({ - '': 'application/octet-stream', # Default - '.py': 'text/plain', - '.c': 'text/plain', - '.h': 'text/plain', - }) - - -# Utilities for CGIHTTPRequestHandler - -def _url_collapse_path(path): - """ - Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references and returns a colllapsed path. - - Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. - The utility of this function is limited to is_cgi method and helps - preventing some security attacks. - - Returns: A tuple of (head, tail) where tail is everything after the final / - and head is everything before it. Head will always start with a '/' and, - if it contains anything else, never have a trailing '/'. - - Raises: IndexError if too many '..' occur within the path. - - """ - # Similar to os.path.split(os.path.normpath(path)) but specific to URL - # path semantics rather than local operating system semantics. - path_parts = path.split('/') - head_parts = [] - for part in path_parts[:-1]: - if part == '..': - head_parts.pop() # IndexError if more '..' than prior parts - elif part and part != '.': - head_parts.append( part ) - if path_parts: - tail_part = path_parts.pop() - if tail_part: - if tail_part == '..': - head_parts.pop() - tail_part = '' - elif tail_part == '.': - tail_part = '' - else: - tail_part = '' - - splitpath = ('/' + '/'.join(head_parts), tail_part) - collapsed_path = "/".join(splitpath) - - return collapsed_path - - - -nobody = None - -def nobody_uid(): - """Internal routine to get nobody's uid""" - global nobody - if nobody: - return nobody - try: - import pwd - except ImportError: - return -1 - try: - nobody = pwd.getpwnam('nobody')[2] - except KeyError: - nobody = 1 + max(x[2] for x in pwd.getpwall()) - return nobody - - -def executable(path): - """Test for executable file.""" - return os.access(path, os.X_OK) - - -class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): - - """Complete HTTP server with GET, HEAD and POST commands. - - GET and HEAD also support running CGI scripts. - - The POST command is *only* implemented for CGI scripts. - - """ - - # Determine platform specifics - have_fork = hasattr(os, 'fork') - - # Make rfile unbuffered -- we need to read one line and then pass - # the rest to a subprocess, so we can't use buffered input. - rbufsize = 0 - - def do_POST(self): - """Serve a POST request. - - This is only implemented for CGI scripts. - - """ - - if self.is_cgi(): - self.run_cgi() - else: - self.send_error(501, "Can only POST to CGI scripts") - - def send_head(self): - """Version of send_head that support CGI scripts""" - if self.is_cgi(): - return self.run_cgi() - else: - return SimpleHTTPRequestHandler.send_head(self) - - def is_cgi(self): - """Test whether self.path corresponds to a CGI script. - - Returns True and updates the cgi_info attribute to the tuple - (dir, rest) if self.path requires running a CGI script. - Returns False otherwise. - - If any exception is raised, the caller should assume that - self.path was rejected as invalid and act accordingly. - - The default implementation tests whether the normalized url - path begins with one of the strings in self.cgi_directories - (and the next character is a '/' or the end of the string). - - """ - collapsed_path = _url_collapse_path(self.path) - dir_sep = collapsed_path.find('/', 1) - head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] - if head in self.cgi_directories: - self.cgi_info = head, tail - return True - return False - - - cgi_directories = ['/cgi-bin', '/htbin'] - - def is_executable(self, path): - """Test whether argument path is an executable file.""" - return executable(path) - - def is_python(self, path): - """Test whether argument path is a Python script.""" - head, tail = os.path.splitext(path) - return tail.lower() in (".py", ".pyw") - - def run_cgi(self): - """Execute a CGI script.""" - path = self.path - dir, rest = self.cgi_info - - i = path.find('/', len(dir) + 1) - while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] - - scriptdir = self.translate_path(nextdir) - if os.path.isdir(scriptdir): - dir, rest = nextdir, nextrest - i = path.find('/', len(dir) + 1) - else: - break - - # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' - - # dissect the part after the directory name into a script name & - # a possible additional path, to be stored in PATH_INFO. - i = rest.find('/') - if i >= 0: - script, rest = rest[:i], rest[i:] - else: - script, rest = rest, '' - - scriptname = dir + '/' + script - scriptfile = self.translate_path(scriptname) - if not os.path.exists(scriptfile): - self.send_error(404, "No such CGI script (%r)" % scriptname) - return - if not os.path.isfile(scriptfile): - self.send_error(403, "CGI script is not a plain file (%r)" % - scriptname) - return - ispy = self.is_python(scriptname) - if self.have_fork or not ispy: - if not self.is_executable(scriptfile): - self.send_error(403, "CGI script is not executable (%r)" % - scriptname) - return - - # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html - # XXX Much of the following could be prepared ahead of time! - env = copy.deepcopy(os.environ) - env['SERVER_SOFTWARE'] = self.version_string() - env['SERVER_NAME'] = self.server.server_name - env['GATEWAY_INTERFACE'] = 'CGI/1.1' - env['SERVER_PROTOCOL'] = self.protocol_version - env['SERVER_PORT'] = str(self.server.server_port) - env['REQUEST_METHOD'] = self.command - uqrest = urllib_parse.unquote(rest) - env['PATH_INFO'] = uqrest - env['PATH_TRANSLATED'] = self.translate_path(uqrest) - env['SCRIPT_NAME'] = scriptname - if query: - env['QUERY_STRING'] = query - env['REMOTE_ADDR'] = self.client_address[0] - authorization = self.headers.get("authorization") - if authorization: - authorization = authorization.split() - if len(authorization) == 2: - import base64, binascii - env['AUTH_TYPE'] = authorization[0] - if authorization[0].lower() == "basic": - try: - authorization = authorization[1].encode('ascii') - if utils.PY3: - # In Py3.3, was: - authorization = base64.decodebytes(authorization).\ - decode('ascii') - else: - # Backport to Py2.7: - authorization = base64.decodestring(authorization).\ - decode('ascii') - except (binascii.Error, UnicodeError): - pass - else: - authorization = authorization.split(':') - if len(authorization) == 2: - env['REMOTE_USER'] = authorization[0] - # XXX REMOTE_IDENT - if self.headers.get('content-type') is None: - env['CONTENT_TYPE'] = self.headers.get_content_type() - else: - env['CONTENT_TYPE'] = self.headers['content-type'] - length = self.headers.get('content-length') - if length: - env['CONTENT_LENGTH'] = length - referer = self.headers.get('referer') - if referer: - env['HTTP_REFERER'] = referer - accept = [] - for line in self.headers.getallmatchingheaders('accept'): - if line[:1] in "\t\n\r ": - accept.append(line.strip()) - else: - accept = accept + line[7:].split(',') - env['HTTP_ACCEPT'] = ','.join(accept) - ua = self.headers.get('user-agent') - if ua: - env['HTTP_USER_AGENT'] = ua - co = filter(None, self.headers.get_all('cookie', [])) - cookie_str = ', '.join(co) - if cookie_str: - env['HTTP_COOKIE'] = cookie_str - # XXX Other HTTP_* headers - # Since we're setting the env in the parent, provide empty - # values to override previously set values - for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', - 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): - env.setdefault(k, "") - - self.send_response(200, "Script output follows") - self.flush_headers() - - decoded_query = query.replace('+', ' ') - - if self.have_fork: - # Unix -- fork as we should - args = [script] - if '=' not in decoded_query: - args.append(decoded_query) - nobody = nobody_uid() - self.wfile.flush() # Always flush before forking - pid = os.fork() - if pid != 0: - # Parent - pid, sts = os.waitpid(pid, 0) - # throw away additional data [see bug #427345] - while select.select([self.rfile], [], [], 0)[0]: - if not self.rfile.read(1): - break - if sts: - self.log_error("CGI script exit status %#x", sts) - return - # Child - try: - try: - os.setuid(nobody) - except os.error: - pass - os.dup2(self.rfile.fileno(), 0) - os.dup2(self.wfile.fileno(), 1) - os.execve(scriptfile, args, env) - except: - self.server.handle_error(self.request, self.client_address) - os._exit(127) - - else: - # Non-Unix -- use subprocess - import subprocess - cmdline = [scriptfile] - if self.is_python(scriptfile): - interp = sys.executable - if interp.lower().endswith("w.exe"): - # On Windows, use python.exe, not pythonw.exe - interp = interp[:-5] + interp[-4:] - cmdline = [interp, '-u'] + cmdline - if '=' not in query: - cmdline.append(query) - self.log_message("command: %s", subprocess.list2cmdline(cmdline)) - try: - nbytes = int(length) - except (TypeError, ValueError): - nbytes = 0 - p = subprocess.Popen(cmdline, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env = env - ) - if self.command.lower() == "post" and nbytes > 0: - data = self.rfile.read(nbytes) - else: - data = None - # throw away additional data [see bug #427345] - while select.select([self.rfile._sock], [], [], 0)[0]: - if not self.rfile._sock.recv(1): - break - stdout, stderr = p.communicate(data) - self.wfile.write(stdout) - if stderr: - self.log_error('%s', stderr) - p.stderr.close() - p.stdout.close() - status = p.returncode - if status: - self.log_error("CGI script exit status %#x", status) - else: - self.log_message("CGI script exited OK") - - -def test(HandlerClass = BaseHTTPRequestHandler, - ServerClass = HTTPServer, protocol="HTTP/1.0", port=8000): - """Test the HTTP request handler class. - - This runs an HTTP server on port 8000 (or the first command line - argument). - - """ - server_address = ('', port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print("Serving HTTP on", sa[0], "port", sa[1], "...") - try: - httpd.serve_forever() - except KeyboardInterrupt: - print("\nKeyboard interrupt received, exiting.") - httpd.server_close() - sys.exit(0) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--cgi', action='store_true', - help='Run as CGI Server') - parser.add_argument('port', action='store', - default=8000, type=int, - nargs='?', - help='Specify alternate port [default: 8000]') - args = parser.parse_args() - if args.cgi: - test(HandlerClass=CGIHTTPRequestHandler, port=args.port) - else: - test(HandlerClass=SimpleHTTPRequestHandler, port=args.port) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/misc.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/misc.py deleted file mode 100755 index 098a066..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/misc.py +++ /dev/null @@ -1,944 +0,0 @@ -""" -Miscellaneous function (re)definitions from the Py3.4+ standard library -for Python 2.6/2.7. - -- math.ceil (for Python 2.7) -- collections.OrderedDict (for Python 2.6) -- collections.Counter (for Python 2.6) -- collections.ChainMap (for all versions prior to Python 3.3) -- itertools.count (for Python 2.6, with step parameter) -- subprocess.check_output (for Python 2.6) -- reprlib.recursive_repr (for Python 2.6+) -- functools.cmp_to_key (for Python 2.6) -""" - -from __future__ import absolute_import - -import subprocess -from math import ceil as oldceil - -from operator import itemgetter as _itemgetter, eq as _eq -import sys -import heapq as _heapq -from _weakref import proxy as _proxy -from itertools import repeat as _repeat, chain as _chain, starmap as _starmap -from socket import getaddrinfo, SOCK_STREAM, error, socket - -from future.utils import iteritems, itervalues, PY2, PY26, PY3 - -if PY2: - from collections import Mapping, MutableMapping -else: - from collections.abc import Mapping, MutableMapping - - -def ceil(x): - """ - Return the ceiling of x as an int. - This is the smallest integral value >= x. - """ - return int(oldceil(x)) - - -######################################################################## -### reprlib.recursive_repr decorator from Py3.4 -######################################################################## - -from itertools import islice - -if PY3: - try: - from _thread import get_ident - except ImportError: - from _dummy_thread import get_ident -else: - try: - from thread import get_ident - except ImportError: - from dummy_thread import get_ident - - -def recursive_repr(fillvalue='...'): - 'Decorator to make a repr function return fillvalue for a recursive call' - - def decorating_function(user_function): - repr_running = set() - - def wrapper(self): - key = id(self), get_ident() - if key in repr_running: - return fillvalue - repr_running.add(key) - try: - result = user_function(self) - finally: - repr_running.discard(key) - return result - - # Can't use functools.wraps() here because of bootstrap issues - wrapper.__module__ = getattr(user_function, '__module__') - wrapper.__doc__ = getattr(user_function, '__doc__') - wrapper.__name__ = getattr(user_function, '__name__') - wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) - return wrapper - - return decorating_function - - -################################################################################ -### OrderedDict -################################################################################ - -class _Link(object): - __slots__ = 'prev', 'next', 'key', '__weakref__' - -class OrderedDict(dict): - 'Dictionary that remembers insertion order' - # An inherited dict maps keys to values. - # The inherited dict provides __getitem__, __len__, __contains__, and get. - # The remaining methods are order-aware. - # Big-O running times for all methods are the same as regular dictionaries. - - # The internal self.__map dict maps keys to links in a doubly linked list. - # The circular doubly linked list starts and ends with a sentinel element. - # The sentinel element never gets deleted (this simplifies the algorithm). - # The sentinel is in self.__hardroot with a weakref proxy in self.__root. - # The prev links are weakref proxies (to prevent circular references). - # Individual links are kept alive by the hard reference in self.__map. - # Those hard references disappear when a key is deleted from an OrderedDict. - - def __init__(*args, **kwds): - '''Initialize an ordered dictionary. The signature is the same as - regular dictionaries, but keyword arguments are not recommended because - their insertion order is arbitrary. - - ''' - if not args: - raise TypeError("descriptor '__init__' of 'OrderedDict' object " - "needs an argument") - self = args[0] - args = args[1:] - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__root - except AttributeError: - self.__hardroot = _Link() - self.__root = root = _proxy(self.__hardroot) - root.prev = root.next = root - self.__map = {} - self.__update(*args, **kwds) - - def __setitem__(self, key, value, - dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): - 'od.__setitem__(i, y) <==> od[i]=y' - # Setting a new item creates a new link at the end of the linked list, - # and the inherited dictionary is updated with the new key/value pair. - if key not in self: - self.__map[key] = link = Link() - root = self.__root - last = root.prev - link.prev, link.next, link.key = last, root, key - last.next = link - root.prev = proxy(link) - dict_setitem(self, key, value) - - def __delitem__(self, key, dict_delitem=dict.__delitem__): - 'od.__delitem__(y) <==> del od[y]' - # Deleting an existing item uses self.__map to find the link which gets - # removed by updating the links in the predecessor and successor nodes. - dict_delitem(self, key) - link = self.__map.pop(key) - link_prev = link.prev - link_next = link.next - link_prev.next = link_next - link_next.prev = link_prev - - def __iter__(self): - 'od.__iter__() <==> iter(od)' - # Traverse the linked list in order. - root = self.__root - curr = root.next - while curr is not root: - yield curr.key - curr = curr.next - - def __reversed__(self): - 'od.__reversed__() <==> reversed(od)' - # Traverse the linked list in reverse order. - root = self.__root - curr = root.prev - while curr is not root: - yield curr.key - curr = curr.prev - - def clear(self): - 'od.clear() -> None. Remove all items from od.' - root = self.__root - root.prev = root.next = root - self.__map.clear() - dict.clear(self) - - def popitem(self, last=True): - '''od.popitem() -> (k, v), return and remove a (key, value) pair. - Pairs are returned in LIFO order if last is true or FIFO order if false. - - ''' - if not self: - raise KeyError('dictionary is empty') - root = self.__root - if last: - link = root.prev - link_prev = link.prev - link_prev.next = root - root.prev = link_prev - else: - link = root.next - link_next = link.next - root.next = link_next - link_next.prev = root - key = link.key - del self.__map[key] - value = dict.pop(self, key) - return key, value - - def move_to_end(self, key, last=True): - '''Move an existing element to the end (or beginning if last==False). - - Raises KeyError if the element does not exist. - When last=True, acts like a fast version of self[key]=self.pop(key). - - ''' - link = self.__map[key] - link_prev = link.prev - link_next = link.next - link_prev.next = link_next - link_next.prev = link_prev - root = self.__root - if last: - last = root.prev - link.prev = last - link.next = root - last.next = root.prev = link - else: - first = root.next - link.prev = root - link.next = first - root.next = first.prev = link - - def __sizeof__(self): - sizeof = sys.getsizeof - n = len(self) + 1 # number of links including root - size = sizeof(self.__dict__) # instance dictionary - size += sizeof(self.__map) * 2 # internal dict and inherited dict - size += sizeof(self.__hardroot) * n # link objects - size += sizeof(self.__root) * n # proxy objects - return size - - update = __update = MutableMapping.update - keys = MutableMapping.keys - values = MutableMapping.values - items = MutableMapping.items - __ne__ = MutableMapping.__ne__ - - __marker = object() - - def pop(self, key, default=__marker): - '''od.pop(k[,d]) -> v, remove specified key and return the corresponding - value. If key is not found, d is returned if given, otherwise KeyError - is raised. - - ''' - if key in self: - result = self[key] - del self[key] - return result - if default is self.__marker: - raise KeyError(key) - return default - - def setdefault(self, key, default=None): - 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' - if key in self: - return self[key] - self[key] = default - return default - - @recursive_repr() - def __repr__(self): - 'od.__repr__() <==> repr(od)' - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, list(self.items())) - - def __reduce__(self): - 'Return state information for pickling' - inst_dict = vars(self).copy() - for k in vars(OrderedDict()): - inst_dict.pop(k, None) - return self.__class__, (), inst_dict or None, None, iter(self.items()) - - def copy(self): - 'od.copy() -> a shallow copy of od' - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. - If not specified, the value defaults to None. - - ''' - self = cls() - for key in iterable: - self[key] = value - return self - - def __eq__(self, other): - '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive - while comparison to a regular mapping is order-insensitive. - - ''' - if isinstance(other, OrderedDict): - return dict.__eq__(self, other) and all(map(_eq, self, other)) - return dict.__eq__(self, other) - - -# {{{ http://code.activestate.com/recipes/576611/ (r11) - -try: - from operator import itemgetter - from heapq import nlargest -except ImportError: - pass - -######################################################################## -### Counter -######################################################################## - -def _count_elements(mapping, iterable): - 'Tally elements from the iterable.' - mapping_get = mapping.get - for elem in iterable: - mapping[elem] = mapping_get(elem, 0) + 1 - -class Counter(dict): - '''Dict subclass for counting hashable items. Sometimes called a bag - or multiset. Elements are stored as dictionary keys and their counts - are stored as dictionary values. - - >>> c = Counter('abcdeabcdabcaba') # count elements from a string - - >>> c.most_common(3) # three most common elements - [('a', 5), ('b', 4), ('c', 3)] - >>> sorted(c) # list all unique elements - ['a', 'b', 'c', 'd', 'e'] - >>> ''.join(sorted(c.elements())) # list elements with repetitions - 'aaaaabbbbcccdde' - >>> sum(c.values()) # total of all counts - 15 - - >>> c['a'] # count of letter 'a' - 5 - >>> for elem in 'shazam': # update counts from an iterable - ... c[elem] += 1 # by adding 1 to each element's count - >>> c['a'] # now there are seven 'a' - 7 - >>> del c['b'] # remove all 'b' - >>> c['b'] # now there are zero 'b' - 0 - - >>> d = Counter('simsalabim') # make another counter - >>> c.update(d) # add in the second counter - >>> c['a'] # now there are nine 'a' - 9 - - >>> c.clear() # empty the counter - >>> c - Counter() - - Note: If a count is set to zero or reduced to zero, it will remain - in the counter until the entry is deleted or the counter is cleared: - - >>> c = Counter('aaabbc') - >>> c['b'] -= 2 # reduce the count of 'b' by two - >>> c.most_common() # 'b' is still in, but its count is zero - [('a', 3), ('c', 1), ('b', 0)] - - ''' - # References: - # http://en.wikipedia.org/wiki/Multiset - # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html - # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm - # http://code.activestate.com/recipes/259174/ - # Knuth, TAOCP Vol. II section 4.6.3 - - def __init__(*args, **kwds): - '''Create a new, empty Counter object. And if given, count elements - from an input iterable. Or, initialize the count from another mapping - of elements to their counts. - - >>> c = Counter() # a new, empty counter - >>> c = Counter('gallahad') # a new counter from an iterable - >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping - >>> c = Counter(a=4, b=2) # a new counter from keyword args - - ''' - if not args: - raise TypeError("descriptor '__init__' of 'Counter' object " - "needs an argument") - self = args[0] - args = args[1:] - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - super(Counter, self).__init__() - self.update(*args, **kwds) - - def __missing__(self, key): - 'The count of elements not in the Counter is zero.' - # Needed so that self[missing_item] does not raise KeyError - return 0 - - def most_common(self, n=None): - '''List the n most common elements and their counts from the most - common to the least. If n is None, then list all element counts. - - >>> Counter('abcdeabcdabcaba').most_common(3) - [('a', 5), ('b', 4), ('c', 3)] - - ''' - # Emulate Bag.sortedByCount from Smalltalk - if n is None: - return sorted(self.items(), key=_itemgetter(1), reverse=True) - return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) - - def elements(self): - '''Iterator over elements repeating each as many times as its count. - - >>> c = Counter('ABCABC') - >>> sorted(c.elements()) - ['A', 'A', 'B', 'B', 'C', 'C'] - - # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 - >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) - >>> product = 1 - >>> for factor in prime_factors.elements(): # loop over factors - ... product *= factor # and multiply them - >>> product - 1836 - - Note, if an element's count has been set to zero or is a negative - number, elements() will ignore it. - - ''' - # Emulate Bag.do from Smalltalk and Multiset.begin from C++. - return _chain.from_iterable(_starmap(_repeat, self.items())) - - # Override dict methods where necessary - - @classmethod - def fromkeys(cls, iterable, v=None): - # There is no equivalent method for counters because setting v=1 - # means that no element can have a count greater than one. - raise NotImplementedError( - 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') - - def update(*args, **kwds): - '''Like dict.update() but add counts instead of replacing them. - - Source can be an iterable, a dictionary, or another Counter instance. - - >>> c = Counter('which') - >>> c.update('witch') # add elements from another iterable - >>> d = Counter('watch') - >>> c.update(d) # add elements from another counter - >>> c['h'] # four 'h' in which, witch, and watch - 4 - - ''' - # The regular dict.update() operation makes no sense here because the - # replace behavior results in the some of original untouched counts - # being mixed-in with all of the other counts for a mismash that - # doesn't have a straight-forward interpretation in most counting - # contexts. Instead, we implement straight-addition. Both the inputs - # and outputs are allowed to contain zero and negative counts. - - if not args: - raise TypeError("descriptor 'update' of 'Counter' object " - "needs an argument") - self = args[0] - args = args[1:] - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - iterable = args[0] if args else None - if iterable is not None: - if isinstance(iterable, Mapping): - if self: - self_get = self.get - for elem, count in iterable.items(): - self[elem] = count + self_get(elem, 0) - else: - super(Counter, self).update(iterable) # fast path when counter is empty - else: - _count_elements(self, iterable) - if kwds: - self.update(kwds) - - def subtract(*args, **kwds): - '''Like dict.update() but subtracts counts instead of replacing them. - Counts can be reduced below zero. Both the inputs and outputs are - allowed to contain zero and negative counts. - - Source can be an iterable, a dictionary, or another Counter instance. - - >>> c = Counter('which') - >>> c.subtract('witch') # subtract elements from another iterable - >>> c.subtract(Counter('watch')) # subtract elements from another counter - >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch - 0 - >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch - -1 - - ''' - if not args: - raise TypeError("descriptor 'subtract' of 'Counter' object " - "needs an argument") - self = args[0] - args = args[1:] - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - iterable = args[0] if args else None - if iterable is not None: - self_get = self.get - if isinstance(iterable, Mapping): - for elem, count in iterable.items(): - self[elem] = self_get(elem, 0) - count - else: - for elem in iterable: - self[elem] = self_get(elem, 0) - 1 - if kwds: - self.subtract(kwds) - - def copy(self): - 'Return a shallow copy.' - return self.__class__(self) - - def __reduce__(self): - return self.__class__, (dict(self),) - - def __delitem__(self, elem): - 'Like dict.__delitem__() but does not raise KeyError for missing values.' - if elem in self: - super(Counter, self).__delitem__(elem) - - def __repr__(self): - if not self: - return '%s()' % self.__class__.__name__ - try: - items = ', '.join(map('%r: %r'.__mod__, self.most_common())) - return '%s({%s})' % (self.__class__.__name__, items) - except TypeError: - # handle case where values are not orderable - return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) - - # Multiset-style mathematical operations discussed in: - # Knuth TAOCP Volume II section 4.6.3 exercise 19 - # and at http://en.wikipedia.org/wiki/Multiset - # - # Outputs guaranteed to only include positive counts. - # - # To strip negative and zero counts, add-in an empty counter: - # c += Counter() - - def __add__(self, other): - '''Add counts from two counters. - - >>> Counter('abbb') + Counter('bcc') - Counter({'b': 4, 'c': 2, 'a': 1}) - - ''' - if not isinstance(other, Counter): - return NotImplemented - result = Counter() - for elem, count in self.items(): - newcount = count + other[elem] - if newcount > 0: - result[elem] = newcount - for elem, count in other.items(): - if elem not in self and count > 0: - result[elem] = count - return result - - def __sub__(self, other): - ''' Subtract count, but keep only results with positive counts. - - >>> Counter('abbbc') - Counter('bccd') - Counter({'b': 2, 'a': 1}) - - ''' - if not isinstance(other, Counter): - return NotImplemented - result = Counter() - for elem, count in self.items(): - newcount = count - other[elem] - if newcount > 0: - result[elem] = newcount - for elem, count in other.items(): - if elem not in self and count < 0: - result[elem] = 0 - count - return result - - def __or__(self, other): - '''Union is the maximum of value in either of the input counters. - - >>> Counter('abbb') | Counter('bcc') - Counter({'b': 3, 'c': 2, 'a': 1}) - - ''' - if not isinstance(other, Counter): - return NotImplemented - result = Counter() - for elem, count in self.items(): - other_count = other[elem] - newcount = other_count if count < other_count else count - if newcount > 0: - result[elem] = newcount - for elem, count in other.items(): - if elem not in self and count > 0: - result[elem] = count - return result - - def __and__(self, other): - ''' Intersection is the minimum of corresponding counts. - - >>> Counter('abbb') & Counter('bcc') - Counter({'b': 1}) - - ''' - if not isinstance(other, Counter): - return NotImplemented - result = Counter() - for elem, count in self.items(): - other_count = other[elem] - newcount = count if count < other_count else other_count - if newcount > 0: - result[elem] = newcount - return result - - def __pos__(self): - 'Adds an empty counter, effectively stripping negative and zero counts' - return self + Counter() - - def __neg__(self): - '''Subtracts from an empty counter. Strips positive and zero counts, - and flips the sign on negative counts. - - ''' - return Counter() - self - - def _keep_positive(self): - '''Internal method to strip elements with a negative or zero count''' - nonpositive = [elem for elem, count in self.items() if not count > 0] - for elem in nonpositive: - del self[elem] - return self - - def __iadd__(self, other): - '''Inplace add from another counter, keeping only positive counts. - - >>> c = Counter('abbb') - >>> c += Counter('bcc') - >>> c - Counter({'b': 4, 'c': 2, 'a': 1}) - - ''' - for elem, count in other.items(): - self[elem] += count - return self._keep_positive() - - def __isub__(self, other): - '''Inplace subtract counter, but keep only results with positive counts. - - >>> c = Counter('abbbc') - >>> c -= Counter('bccd') - >>> c - Counter({'b': 2, 'a': 1}) - - ''' - for elem, count in other.items(): - self[elem] -= count - return self._keep_positive() - - def __ior__(self, other): - '''Inplace union is the maximum of value from either counter. - - >>> c = Counter('abbb') - >>> c |= Counter('bcc') - >>> c - Counter({'b': 3, 'c': 2, 'a': 1}) - - ''' - for elem, other_count in other.items(): - count = self[elem] - if other_count > count: - self[elem] = other_count - return self._keep_positive() - - def __iand__(self, other): - '''Inplace intersection is the minimum of corresponding counts. - - >>> c = Counter('abbb') - >>> c &= Counter('bcc') - >>> c - Counter({'b': 1}) - - ''' - for elem, count in self.items(): - other_count = other[elem] - if other_count < count: - self[elem] = other_count - return self._keep_positive() - - -def check_output(*popenargs, **kwargs): - """ - For Python 2.6 compatibility: see - http://stackoverflow.com/questions/4814970/ - """ - - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise subprocess.CalledProcessError(retcode, cmd) - return output - - -def count(start=0, step=1): - """ - ``itertools.count`` in Py 2.6 doesn't accept a step - parameter. This is an enhanced version of ``itertools.count`` - for Py2.6 equivalent to ``itertools.count`` in Python 2.7+. - """ - while True: - yield start - start += step - - -######################################################################## -### ChainMap (helper for configparser and string.Template) -### From the Py3.4 source code. See also: -### https://github.com/kkxue/Py2ChainMap/blob/master/py2chainmap.py -######################################################################## - -class ChainMap(MutableMapping): - ''' A ChainMap groups multiple dicts (or other mappings) together - to create a single, updateable view. - - The underlying mappings are stored in a list. That list is public and can - accessed or updated using the *maps* attribute. There is no other state. - - Lookups search the underlying mappings successively until a key is found. - In contrast, writes, updates, and deletions only operate on the first - mapping. - - ''' - - def __init__(self, *maps): - '''Initialize a ChainMap by setting *maps* to the given mappings. - If no mappings are provided, a single empty dictionary is used. - - ''' - self.maps = list(maps) or [{}] # always at least one map - - def __missing__(self, key): - raise KeyError(key) - - def __getitem__(self, key): - for mapping in self.maps: - try: - return mapping[key] # can't use 'key in mapping' with defaultdict - except KeyError: - pass - return self.__missing__(key) # support subclasses that define __missing__ - - def get(self, key, default=None): - return self[key] if key in self else default - - def __len__(self): - return len(set().union(*self.maps)) # reuses stored hash values if possible - - def __iter__(self): - return iter(set().union(*self.maps)) - - def __contains__(self, key): - return any(key in m for m in self.maps) - - def __bool__(self): - return any(self.maps) - - # Py2 compatibility: - __nonzero__ = __bool__ - - @recursive_repr() - def __repr__(self): - return '{0.__class__.__name__}({1})'.format( - self, ', '.join(map(repr, self.maps))) - - @classmethod - def fromkeys(cls, iterable, *args): - 'Create a ChainMap with a single dict created from the iterable.' - return cls(dict.fromkeys(iterable, *args)) - - def copy(self): - 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' - return self.__class__(self.maps[0].copy(), *self.maps[1:]) - - __copy__ = copy - - def new_child(self, m=None): # like Django's Context.push() - ''' - New ChainMap with a new map followed by all previous maps. If no - map is provided, an empty dict is used. - ''' - if m is None: - m = {} - return self.__class__(m, *self.maps) - - @property - def parents(self): # like Django's Context.pop() - 'New ChainMap from maps[1:].' - return self.__class__(*self.maps[1:]) - - def __setitem__(self, key, value): - self.maps[0][key] = value - - def __delitem__(self, key): - try: - del self.maps[0][key] - except KeyError: - raise KeyError('Key not found in the first mapping: {0!r}'.format(key)) - - def popitem(self): - 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' - try: - return self.maps[0].popitem() - except KeyError: - raise KeyError('No keys found in the first mapping.') - - def pop(self, key, *args): - 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' - try: - return self.maps[0].pop(key, *args) - except KeyError: - raise KeyError('Key not found in the first mapping: {0!r}'.format(key)) - - def clear(self): - 'Clear maps[0], leaving maps[1:] intact.' - self.maps[0].clear() - - -# Re-use the same sentinel as in the Python stdlib socket module: -from socket import _GLOBAL_DEFAULT_TIMEOUT -# Was: _GLOBAL_DEFAULT_TIMEOUT = object() - - -def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, - source_address=None): - """Backport of 3-argument create_connection() for Py2.6. - - Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - An host of '' or port 0 tells the OS to use the default. - """ - - host, port = address - err = None - for res in getaddrinfo(host, port, 0, SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket(af, socktype, proto) - if timeout is not _GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - return sock - - except error as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: - raise err - else: - raise error("getaddrinfo returns an empty list") - -# Backport from Py2.7 for Py2.6: -def cmp_to_key(mycmp): - """Convert a cmp= function into a key= function""" - class K(object): - __slots__ = ['obj'] - def __init__(self, obj, *args): - self.obj = obj - def __lt__(self, other): - return mycmp(self.obj, other.obj) < 0 - def __gt__(self, other): - return mycmp(self.obj, other.obj) > 0 - def __eq__(self, other): - return mycmp(self.obj, other.obj) == 0 - def __le__(self, other): - return mycmp(self.obj, other.obj) <= 0 - def __ge__(self, other): - return mycmp(self.obj, other.obj) >= 0 - def __ne__(self, other): - return mycmp(self.obj, other.obj) != 0 - def __hash__(self): - raise TypeError('hash not implemented') - return K - -# Back up our definitions above in case they're useful -_OrderedDict = OrderedDict -_Counter = Counter -_check_output = check_output -_count = count -_ceil = ceil -__count_elements = _count_elements -_recursive_repr = recursive_repr -_ChainMap = ChainMap -_create_connection = create_connection -_cmp_to_key = cmp_to_key - -# Overwrite the definitions above with the usual ones -# from the standard library: -if sys.version_info >= (2, 7): - from collections import OrderedDict, Counter - from itertools import count - from functools import cmp_to_key - try: - from subprocess import check_output - except ImportError: - # Not available. This happens with Google App Engine: see issue #231 - pass - from socket import create_connection - -if sys.version_info >= (3, 0): - from math import ceil - from collections import _count_elements - -if sys.version_info >= (3, 3): - from reprlib import recursive_repr - from collections import ChainMap diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/socket.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/socket.py deleted file mode 100755 index 930e1da..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/socket.py +++ /dev/null @@ -1,454 +0,0 @@ -# Wrapper module for _socket, providing some additional facilities -# implemented in Python. - -"""\ -This module provides socket operations and some related functions. -On Unix, it supports IP (Internet Protocol) and Unix domain sockets. -On other systems, it only supports IP. Functions specific for a -socket are available as methods of the socket object. - -Functions: - -socket() -- create a new socket object -socketpair() -- create a pair of new socket objects [*] -fromfd() -- create a socket object from an open file descriptor [*] -fromshare() -- create a socket object from data received from socket.share() [*] -gethostname() -- return the current hostname -gethostbyname() -- map a hostname to its IP number -gethostbyaddr() -- map an IP number or hostname to DNS info -getservbyname() -- map a service name and a protocol name to a port number -getprotobyname() -- map a protocol name (e.g. 'tcp') to a number -ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order -htons(), htonl() -- convert 16, 32 bit int from host to network byte order -inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format -inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) -socket.getdefaulttimeout() -- get the default timeout value -socket.setdefaulttimeout() -- set the default timeout value -create_connection() -- connects to an address, with an optional timeout and - optional source address. - - [*] not available on all platforms! - -Special objects: - -SocketType -- type object for socket objects -error -- exception raised for I/O errors -has_ipv6 -- boolean value indicating if IPv6 is supported - -Integer constants: - -AF_INET, AF_UNIX -- socket domains (first argument to socket() call) -SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) - -Many other constants may be defined; these may be used in calls to -the setsockopt() and getsockopt() methods. -""" - -from __future__ import unicode_literals -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -from future.builtins import super - -import _socket -from _socket import * - -import os, sys, io - -try: - import errno -except ImportError: - errno = None -EBADF = getattr(errno, 'EBADF', 9) -EAGAIN = getattr(errno, 'EAGAIN', 11) -EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) - -__all__ = ["getfqdn", "create_connection"] -__all__.extend(os._get_exports_list(_socket)) - - -_realsocket = socket - -# WSA error codes -if sys.platform.lower().startswith("win"): - errorTab = {} - errorTab[10004] = "The operation was interrupted." - errorTab[10009] = "A bad file handle was passed." - errorTab[10013] = "Permission denied." - errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT - errorTab[10022] = "An invalid operation was attempted." - errorTab[10035] = "The socket operation would block" - errorTab[10036] = "A blocking operation is already in progress." - errorTab[10048] = "The network address is in use." - errorTab[10054] = "The connection has been reset." - errorTab[10058] = "The network has been shut down." - errorTab[10060] = "The operation timed out." - errorTab[10061] = "Connection refused." - errorTab[10063] = "The name is too long." - errorTab[10064] = "The host is down." - errorTab[10065] = "The host is unreachable." - __all__.append("errorTab") - - -class socket(_socket.socket): - - """A subclass of _socket.socket adding the makefile() method.""" - - __slots__ = ["__weakref__", "_io_refs", "_closed"] - - def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None): - if fileno is None: - _socket.socket.__init__(self, family, type, proto) - else: - _socket.socket.__init__(self, family, type, proto, fileno) - self._io_refs = 0 - self._closed = False - - def __enter__(self): - return self - - def __exit__(self, *args): - if not self._closed: - self.close() - - def __repr__(self): - """Wrap __repr__() to reveal the real class name.""" - s = _socket.socket.__repr__(self) - if s.startswith(" socket object - - Return a new socket object connected to the same system resource. - """ - fd = dup(self.fileno()) - sock = self.__class__(self.family, self.type, self.proto, fileno=fd) - sock.settimeout(self.gettimeout()) - return sock - - def accept(self): - """accept() -> (socket object, address info) - - Wait for an incoming connection. Return a new socket - representing the connection, and the address of the client. - For IP sockets, the address info is a pair (hostaddr, port). - """ - fd, addr = self._accept() - sock = socket(self.family, self.type, self.proto, fileno=fd) - # Issue #7995: if no default timeout is set and the listening - # socket had a (non-zero) timeout, force the new socket in blocking - # mode to override platform-specific socket flags inheritance. - if getdefaulttimeout() is None and self.gettimeout(): - sock.setblocking(True) - return sock, addr - - def makefile(self, mode="r", buffering=None, **_3to2kwargs): - """makefile(...) -> an I/O stream connected to the socket - - The arguments are as for io.open() after the filename, - except the only mode characters supported are 'r', 'w' and 'b'. - The semantics are similar too. (XXX refactor to share code?) - """ - if 'newline' in _3to2kwargs: newline = _3to2kwargs['newline']; del _3to2kwargs['newline'] - else: newline = None - if 'errors' in _3to2kwargs: errors = _3to2kwargs['errors']; del _3to2kwargs['errors'] - else: errors = None - if 'encoding' in _3to2kwargs: encoding = _3to2kwargs['encoding']; del _3to2kwargs['encoding'] - else: encoding = None - for c in mode: - if c not in ("r", "w", "b"): - raise ValueError("invalid mode %r (only r, w, b allowed)") - writing = "w" in mode - reading = "r" in mode or not writing - assert reading or writing - binary = "b" in mode - rawmode = "" - if reading: - rawmode += "r" - if writing: - rawmode += "w" - raw = SocketIO(self, rawmode) - self._io_refs += 1 - if buffering is None: - buffering = -1 - if buffering < 0: - buffering = io.DEFAULT_BUFFER_SIZE - if buffering == 0: - if not binary: - raise ValueError("unbuffered streams must be binary") - return raw - if reading and writing: - buffer = io.BufferedRWPair(raw, raw, buffering) - elif reading: - buffer = io.BufferedReader(raw, buffering) - else: - assert writing - buffer = io.BufferedWriter(raw, buffering) - if binary: - return buffer - text = io.TextIOWrapper(buffer, encoding, errors, newline) - text.mode = mode - return text - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self, _ss=_socket.socket): - # This function should not reference any globals. See issue #808164. - _ss.close(self) - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() - - def detach(self): - """detach() -> file descriptor - - Close the socket object without closing the underlying file descriptor. - The object cannot be used after this call, but the file descriptor - can be reused for other purposes. The file descriptor is returned. - """ - self._closed = True - return super().detach() - -def fromfd(fd, family, type, proto=0): - """ fromfd(fd, family, type[, proto]) -> socket object - - Create a socket object from a duplicate of the given file - descriptor. The remaining arguments are the same as for socket(). - """ - nfd = dup(fd) - return socket(family, type, proto, nfd) - -if hasattr(_socket.socket, "share"): - def fromshare(info): - """ fromshare(info) -> socket object - - Create a socket object from a the bytes object returned by - socket.share(pid). - """ - return socket(0, 0, 0, info) - -if hasattr(_socket, "socketpair"): - - def socketpair(family=None, type=SOCK_STREAM, proto=0): - """socketpair([family[, type[, proto]]]) -> (socket object, socket object) - - Create a pair of socket objects from the sockets returned by the platform - socketpair() function. - The arguments are the same as for socket() except the default family is - AF_UNIX if defined on the platform; otherwise, the default is AF_INET. - """ - if family is None: - try: - family = AF_UNIX - except NameError: - family = AF_INET - a, b = _socket.socketpair(family, type, proto) - a = socket(family, type, proto, a.detach()) - b = socket(family, type, proto, b.detach()) - return a, b - - -_blocking_errnos = set([EAGAIN, EWOULDBLOCK]) - -class SocketIO(io.RawIOBase): - - """Raw I/O implementation for stream sockets. - - This class supports the makefile() method on sockets. It provides - the raw I/O interface on top of a socket object. - """ - - # One might wonder why not let FileIO do the job instead. There are two - # main reasons why FileIO is not adapted: - # - it wouldn't work under Windows (where you can't used read() and - # write() on a socket handle) - # - it wouldn't work with socket timeouts (FileIO would ignore the - # timeout and consider the socket non-blocking) - - # XXX More docs - - def __init__(self, sock, mode): - if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): - raise ValueError("invalid mode: %r" % mode) - io.RawIOBase.__init__(self) - self._sock = sock - if "b" not in mode: - mode += "b" - self._mode = mode - self._reading = "r" in mode - self._writing = "w" in mode - self._timeout_occurred = False - - def readinto(self, b): - """Read up to len(b) bytes into the writable buffer *b* and return - the number of bytes read. If the socket is non-blocking and no bytes - are available, None is returned. - - If *b* is non-empty, a 0 return value indicates that the connection - was shutdown at the other end. - """ - self._checkClosed() - self._checkReadable() - if self._timeout_occurred: - raise IOError("cannot read from timed out object") - while True: - try: - return self._sock.recv_into(b) - except timeout: - self._timeout_occurred = True - raise - # except InterruptedError: - # continue - except error as e: - if e.args[0] in _blocking_errnos: - return None - raise - - def write(self, b): - """Write the given bytes or bytearray object *b* to the socket - and return the number of bytes written. This can be less than - len(b) if not all data could be written. If the socket is - non-blocking and no bytes could be written None is returned. - """ - self._checkClosed() - self._checkWritable() - try: - return self._sock.send(b) - except error as e: - # XXX what about EINTR? - if e.args[0] in _blocking_errnos: - return None - raise - - def readable(self): - """True if the SocketIO is open for reading. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return self._reading - - def writable(self): - """True if the SocketIO is open for writing. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return self._writing - - def seekable(self): - """True if the SocketIO is open for seeking. - """ - if self.closed: - raise ValueError("I/O operation on closed socket.") - return super().seekable() - - def fileno(self): - """Return the file descriptor of the underlying socket. - """ - self._checkClosed() - return self._sock.fileno() - - @property - def name(self): - if not self.closed: - return self.fileno() - else: - return -1 - - @property - def mode(self): - return self._mode - - def close(self): - """Close the SocketIO object. This doesn't close the underlying - socket, except if all references to it have disappeared. - """ - if self.closed: - return - io.RawIOBase.close(self) - self._sock._decref_socketios() - self._sock = None - - -def getfqdn(name=''): - """Get fully qualified domain name from name. - - An empty argument is interpreted as meaning the local host. - - First the hostname returned by gethostbyaddr() is checked, then - possibly existing aliases. In case no FQDN is available, hostname - from gethostname() is returned. - """ - name = name.strip() - if not name or name == '0.0.0.0': - name = gethostname() - try: - hostname, aliases, ipaddrs = gethostbyaddr(name) - except error: - pass - else: - aliases.insert(0, hostname) - for name in aliases: - if '.' in name: - break - else: - name = hostname - return name - - -# Re-use the same sentinel as in the Python stdlib socket module: -from socket import _GLOBAL_DEFAULT_TIMEOUT -# Was: _GLOBAL_DEFAULT_TIMEOUT = object() - - -def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, - source_address=None): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - An host of '' or port 0 tells the OS to use the default. - """ - - host, port = address - err = None - for res in getaddrinfo(host, port, 0, SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket(af, socktype, proto) - if timeout is not _GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - return sock - - except error as _: - err = _ - if sock is not None: - sock.close() - - if err is not None: - raise err - else: - raise error("getaddrinfo returns an empty list") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/socketserver.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/socketserver.py deleted file mode 100755 index d1e24a6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/socketserver.py +++ /dev/null @@ -1,747 +0,0 @@ -"""Generic socket server classes. - -This module tries to capture the various aspects of defining a server: - -For socket-based servers: - -- address family: - - AF_INET{,6}: IP (Internet Protocol) sockets (default) - - AF_UNIX: Unix domain sockets - - others, e.g. AF_DECNET are conceivable (see -- socket type: - - SOCK_STREAM (reliable stream, e.g. TCP) - - SOCK_DGRAM (datagrams, e.g. UDP) - -For request-based servers (including socket-based): - -- client address verification before further looking at the request - (This is actually a hook for any processing that needs to look - at the request before anything else, e.g. logging) -- how to handle multiple requests: - - synchronous (one request is handled at a time) - - forking (each request is handled by a new process) - - threading (each request is handled by a new thread) - -The classes in this module favor the server type that is simplest to -write: a synchronous TCP/IP server. This is bad class design, but -save some typing. (There's also the issue that a deep class hierarchy -slows down method lookups.) - -There are five classes in an inheritance diagram, four of which represent -synchronous servers of four types: - - +------------+ - | BaseServer | - +------------+ - | - v - +-----------+ +------------------+ - | TCPServer |------->| UnixStreamServer | - +-----------+ +------------------+ - | - v - +-----------+ +--------------------+ - | UDPServer |------->| UnixDatagramServer | - +-----------+ +--------------------+ - -Note that UnixDatagramServer derives from UDPServer, not from -UnixStreamServer -- the only difference between an IP and a Unix -stream server is the address family, which is simply repeated in both -unix server classes. - -Forking and threading versions of each type of server can be created -using the ForkingMixIn and ThreadingMixIn mix-in classes. For -instance, a threading UDP server class is created as follows: - - class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass - -The Mix-in class must come first, since it overrides a method defined -in UDPServer! Setting the various member variables also changes -the behavior of the underlying server mechanism. - -To implement a service, you must derive a class from -BaseRequestHandler and redefine its handle() method. You can then run -various versions of the service by combining one of the server classes -with your request handler class. - -The request handler class must be different for datagram or stream -services. This can be hidden by using the request handler -subclasses StreamRequestHandler or DatagramRequestHandler. - -Of course, you still have to use your head! - -For instance, it makes no sense to use a forking server if the service -contains state in memory that can be modified by requests (since the -modifications in the child process would never reach the initial state -kept in the parent process and passed to each child). In this case, -you can use a threading server, but you will probably have to use -locks to avoid two requests that come in nearly simultaneous to apply -conflicting changes to the server state. - -On the other hand, if you are building e.g. an HTTP server, where all -data is stored externally (e.g. in the file system), a synchronous -class will essentially render the service "deaf" while one request is -being handled -- which may be for a very long time if a client is slow -to read all the data it has requested. Here a threading or forking -server is appropriate. - -In some cases, it may be appropriate to process part of a request -synchronously, but to finish processing in a forked child depending on -the request data. This can be implemented by using a synchronous -server and doing an explicit fork in the request handler class -handle() method. - -Another approach to handling multiple simultaneous requests in an -environment that supports neither threads nor fork (or where these are -too expensive or inappropriate for the service) is to maintain an -explicit table of partially finished requests and to use select() to -decide which request to work on next (or whether to handle a new -incoming request). This is particularly important for stream services -where each client can potentially be connected for a long time (if -threads or subprocesses cannot be used). - -Future work: -- Standard classes for Sun RPC (which uses either UDP or TCP) -- Standard mix-in classes to implement various authentication - and encryption schemes -- Standard framework for select-based multiplexing - -XXX Open problems: -- What to do with out-of-band data? - -BaseServer: -- split generic "request" functionality out into BaseServer class. - Copyright (C) 2000 Luke Kenneth Casson Leighton - - example: read entries from a SQL database (requires overriding - get_request() to return a table entry from the database). - entry is processed by a RequestHandlerClass. - -""" - -# Author of the BaseServer patch: Luke Kenneth Casson Leighton - -# XXX Warning! -# There is a test suite for this module, but it cannot be run by the -# standard regression test. -# To run it manually, run Lib/test/test_socketserver.py. - -from __future__ import (absolute_import, print_function) - -__version__ = "0.4" - - -import socket -import select -import sys -import os -import errno -try: - import threading -except ImportError: - import dummy_threading as threading - -__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer", - "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler", - "StreamRequestHandler","DatagramRequestHandler", - "ThreadingMixIn", "ForkingMixIn"] -if hasattr(socket, "AF_UNIX"): - __all__.extend(["UnixStreamServer","UnixDatagramServer", - "ThreadingUnixStreamServer", - "ThreadingUnixDatagramServer"]) - -def _eintr_retry(func, *args): - """restart a system call interrupted by EINTR""" - while True: - try: - return func(*args) - except OSError as e: - if e.errno != errno.EINTR: - raise - -class BaseServer(object): - - """Base class for server classes. - - Methods for the caller: - - - __init__(server_address, RequestHandlerClass) - - serve_forever(poll_interval=0.5) - - shutdown() - - handle_request() # if you do not use serve_forever() - - fileno() -> int # for select() - - Methods that may be overridden: - - - server_bind() - - server_activate() - - get_request() -> request, client_address - - handle_timeout() - - verify_request(request, client_address) - - server_close() - - process_request(request, client_address) - - shutdown_request(request) - - close_request(request) - - service_actions() - - handle_error() - - Methods for derived classes: - - - finish_request(request, client_address) - - Class variables that may be overridden by derived classes or - instances: - - - timeout - - address_family - - socket_type - - allow_reuse_address - - Instance variables: - - - RequestHandlerClass - - socket - - """ - - timeout = None - - def __init__(self, server_address, RequestHandlerClass): - """Constructor. May be extended, do not override.""" - self.server_address = server_address - self.RequestHandlerClass = RequestHandlerClass - self.__is_shut_down = threading.Event() - self.__shutdown_request = False - - def server_activate(self): - """Called by constructor to activate the server. - - May be overridden. - - """ - pass - - def serve_forever(self, poll_interval=0.5): - """Handle one request at a time until shutdown. - - Polls for shutdown every poll_interval seconds. Ignores - self.timeout. If you need to do periodic tasks, do them in - another thread. - """ - self.__is_shut_down.clear() - try: - while not self.__shutdown_request: - # XXX: Consider using another file descriptor or - # connecting to the socket to wake this up instead of - # polling. Polling reduces our responsiveness to a - # shutdown request and wastes cpu at all other times. - r, w, e = _eintr_retry(select.select, [self], [], [], - poll_interval) - if self in r: - self._handle_request_noblock() - - self.service_actions() - finally: - self.__shutdown_request = False - self.__is_shut_down.set() - - def shutdown(self): - """Stops the serve_forever loop. - - Blocks until the loop has finished. This must be called while - serve_forever() is running in another thread, or it will - deadlock. - """ - self.__shutdown_request = True - self.__is_shut_down.wait() - - def service_actions(self): - """Called by the serve_forever() loop. - - May be overridden by a subclass / Mixin to implement any code that - needs to be run during the loop. - """ - pass - - # The distinction between handling, getting, processing and - # finishing a request is fairly arbitrary. Remember: - # - # - handle_request() is the top-level call. It calls - # select, get_request(), verify_request() and process_request() - # - get_request() is different for stream or datagram sockets - # - process_request() is the place that may fork a new process - # or create a new thread to finish the request - # - finish_request() instantiates the request handler class; - # this constructor will handle the request all by itself - - def handle_request(self): - """Handle one request, possibly blocking. - - Respects self.timeout. - """ - # Support people who used socket.settimeout() to escape - # handle_request before self.timeout was available. - timeout = self.socket.gettimeout() - if timeout is None: - timeout = self.timeout - elif self.timeout is not None: - timeout = min(timeout, self.timeout) - fd_sets = _eintr_retry(select.select, [self], [], [], timeout) - if not fd_sets[0]: - self.handle_timeout() - return - self._handle_request_noblock() - - def _handle_request_noblock(self): - """Handle one request, without blocking. - - I assume that select.select has returned that the socket is - readable before this function was called, so there should be - no risk of blocking in get_request(). - """ - try: - request, client_address = self.get_request() - except socket.error: - return - if self.verify_request(request, client_address): - try: - self.process_request(request, client_address) - except: - self.handle_error(request, client_address) - self.shutdown_request(request) - - def handle_timeout(self): - """Called if no new request arrives within self.timeout. - - Overridden by ForkingMixIn. - """ - pass - - def verify_request(self, request, client_address): - """Verify the request. May be overridden. - - Return True if we should proceed with this request. - - """ - return True - - def process_request(self, request, client_address): - """Call finish_request. - - Overridden by ForkingMixIn and ThreadingMixIn. - - """ - self.finish_request(request, client_address) - self.shutdown_request(request) - - def server_close(self): - """Called to clean-up the server. - - May be overridden. - - """ - pass - - def finish_request(self, request, client_address): - """Finish one request by instantiating RequestHandlerClass.""" - self.RequestHandlerClass(request, client_address, self) - - def shutdown_request(self, request): - """Called to shutdown and close an individual request.""" - self.close_request(request) - - def close_request(self, request): - """Called to clean up an individual request.""" - pass - - def handle_error(self, request, client_address): - """Handle an error gracefully. May be overridden. - - The default is to print a traceback and continue. - - """ - print('-'*40) - print('Exception happened during processing of request from', end=' ') - print(client_address) - import traceback - traceback.print_exc() # XXX But this goes to stderr! - print('-'*40) - - -class TCPServer(BaseServer): - - """Base class for various socket-based server classes. - - Defaults to synchronous IP stream (i.e., TCP). - - Methods for the caller: - - - __init__(server_address, RequestHandlerClass, bind_and_activate=True) - - serve_forever(poll_interval=0.5) - - shutdown() - - handle_request() # if you don't use serve_forever() - - fileno() -> int # for select() - - Methods that may be overridden: - - - server_bind() - - server_activate() - - get_request() -> request, client_address - - handle_timeout() - - verify_request(request, client_address) - - process_request(request, client_address) - - shutdown_request(request) - - close_request(request) - - handle_error() - - Methods for derived classes: - - - finish_request(request, client_address) - - Class variables that may be overridden by derived classes or - instances: - - - timeout - - address_family - - socket_type - - request_queue_size (only for stream sockets) - - allow_reuse_address - - Instance variables: - - - server_address - - RequestHandlerClass - - socket - - """ - - address_family = socket.AF_INET - - socket_type = socket.SOCK_STREAM - - request_queue_size = 5 - - allow_reuse_address = False - - def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True): - """Constructor. May be extended, do not override.""" - BaseServer.__init__(self, server_address, RequestHandlerClass) - self.socket = socket.socket(self.address_family, - self.socket_type) - if bind_and_activate: - self.server_bind() - self.server_activate() - - def server_bind(self): - """Called by constructor to bind the socket. - - May be overridden. - - """ - if self.allow_reuse_address: - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.socket.bind(self.server_address) - self.server_address = self.socket.getsockname() - - def server_activate(self): - """Called by constructor to activate the server. - - May be overridden. - - """ - self.socket.listen(self.request_queue_size) - - def server_close(self): - """Called to clean-up the server. - - May be overridden. - - """ - self.socket.close() - - def fileno(self): - """Return socket file number. - - Interface required by select(). - - """ - return self.socket.fileno() - - def get_request(self): - """Get the request and client address from the socket. - - May be overridden. - - """ - return self.socket.accept() - - def shutdown_request(self, request): - """Called to shutdown and close an individual request.""" - try: - #explicitly shutdown. socket.close() merely releases - #the socket and waits for GC to perform the actual close. - request.shutdown(socket.SHUT_WR) - except socket.error: - pass #some platforms may raise ENOTCONN here - self.close_request(request) - - def close_request(self, request): - """Called to clean up an individual request.""" - request.close() - - -class UDPServer(TCPServer): - - """UDP server class.""" - - allow_reuse_address = False - - socket_type = socket.SOCK_DGRAM - - max_packet_size = 8192 - - def get_request(self): - data, client_addr = self.socket.recvfrom(self.max_packet_size) - return (data, self.socket), client_addr - - def server_activate(self): - # No need to call listen() for UDP. - pass - - def shutdown_request(self, request): - # No need to shutdown anything. - self.close_request(request) - - def close_request(self, request): - # No need to close anything. - pass - -class ForkingMixIn(object): - - """Mix-in class to handle each request in a new process.""" - - timeout = 300 - active_children = None - max_children = 40 - - def collect_children(self): - """Internal routine to wait for children that have exited.""" - if self.active_children is None: return - while len(self.active_children) >= self.max_children: - # XXX: This will wait for any child process, not just ones - # spawned by this library. This could confuse other - # libraries that expect to be able to wait for their own - # children. - try: - pid, status = os.waitpid(0, 0) - except os.error: - pid = None - if pid not in self.active_children: continue - self.active_children.remove(pid) - - # XXX: This loop runs more system calls than it ought - # to. There should be a way to put the active_children into a - # process group and then use os.waitpid(-pgid) to wait for any - # of that set, but I couldn't find a way to allocate pgids - # that couldn't collide. - for child in self.active_children: - try: - pid, status = os.waitpid(child, os.WNOHANG) - except os.error: - pid = None - if not pid: continue - try: - self.active_children.remove(pid) - except ValueError as e: - raise ValueError('%s. x=%d and list=%r' % (e.message, pid, - self.active_children)) - - def handle_timeout(self): - """Wait for zombies after self.timeout seconds of inactivity. - - May be extended, do not override. - """ - self.collect_children() - - def service_actions(self): - """Collect the zombie child processes regularly in the ForkingMixIn. - - service_actions is called in the BaseServer's serve_forver loop. - """ - self.collect_children() - - def process_request(self, request, client_address): - """Fork a new subprocess to process the request.""" - pid = os.fork() - if pid: - # Parent process - if self.active_children is None: - self.active_children = [] - self.active_children.append(pid) - self.close_request(request) - return - else: - # Child process. - # This must never return, hence os._exit()! - try: - self.finish_request(request, client_address) - self.shutdown_request(request) - os._exit(0) - except: - try: - self.handle_error(request, client_address) - self.shutdown_request(request) - finally: - os._exit(1) - - -class ThreadingMixIn(object): - """Mix-in class to handle each request in a new thread.""" - - # Decides how threads will act upon termination of the - # main process - daemon_threads = False - - def process_request_thread(self, request, client_address): - """Same as in BaseServer but as a thread. - - In addition, exception handling is done here. - - """ - try: - self.finish_request(request, client_address) - self.shutdown_request(request) - except: - self.handle_error(request, client_address) - self.shutdown_request(request) - - def process_request(self, request, client_address): - """Start a new thread to process the request.""" - t = threading.Thread(target = self.process_request_thread, - args = (request, client_address)) - t.daemon = self.daemon_threads - t.start() - - -class ForkingUDPServer(ForkingMixIn, UDPServer): pass -class ForkingTCPServer(ForkingMixIn, TCPServer): pass - -class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass -class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass - -if hasattr(socket, 'AF_UNIX'): - - class UnixStreamServer(TCPServer): - address_family = socket.AF_UNIX - - class UnixDatagramServer(UDPServer): - address_family = socket.AF_UNIX - - class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass - - class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass - -class BaseRequestHandler(object): - - """Base class for request handler classes. - - This class is instantiated for each request to be handled. The - constructor sets the instance variables request, client_address - and server, and then calls the handle() method. To implement a - specific service, all you need to do is to derive a class which - defines a handle() method. - - The handle() method can find the request as self.request, the - client address as self.client_address, and the server (in case it - needs access to per-server information) as self.server. Since a - separate instance is created for each request, the handle() method - can define arbitrary other instance variariables. - - """ - - def __init__(self, request, client_address, server): - self.request = request - self.client_address = client_address - self.server = server - self.setup() - try: - self.handle() - finally: - self.finish() - - def setup(self): - pass - - def handle(self): - pass - - def finish(self): - pass - - -# The following two classes make it possible to use the same service -# class for stream or datagram servers. -# Each class sets up these instance variables: -# - rfile: a file object from which receives the request is read -# - wfile: a file object to which the reply is written -# When the handle() method returns, wfile is flushed properly - - -class StreamRequestHandler(BaseRequestHandler): - - """Define self.rfile and self.wfile for stream sockets.""" - - # Default buffer sizes for rfile, wfile. - # We default rfile to buffered because otherwise it could be - # really slow for large data (a getc() call per byte); we make - # wfile unbuffered because (a) often after a write() we want to - # read and we need to flush the line; (b) big writes to unbuffered - # files are typically optimized by stdio even when big reads - # aren't. - rbufsize = -1 - wbufsize = 0 - - # A timeout to apply to the request socket, if not None. - timeout = None - - # Disable nagle algorithm for this socket, if True. - # Use only when wbufsize != 0, to avoid small packets. - disable_nagle_algorithm = False - - def setup(self): - self.connection = self.request - if self.timeout is not None: - self.connection.settimeout(self.timeout) - if self.disable_nagle_algorithm: - self.connection.setsockopt(socket.IPPROTO_TCP, - socket.TCP_NODELAY, True) - self.rfile = self.connection.makefile('rb', self.rbufsize) - self.wfile = self.connection.makefile('wb', self.wbufsize) - - def finish(self): - if not self.wfile.closed: - try: - self.wfile.flush() - except socket.error: - # An final socket error may have occurred here, such as - # the local error ECONNABORTED. - pass - self.wfile.close() - self.rfile.close() - - -class DatagramRequestHandler(BaseRequestHandler): - - # XXX Regrettably, I cannot get this working on Linux; - # s.recvfrom() doesn't return a meaningful client address. - - """Define self.rfile and self.wfile for datagram sockets.""" - - def setup(self): - from io import BytesIO - self.packet, self.socket = self.request - self.rfile = BytesIO(self.packet) - self.wfile = BytesIO() - - def finish(self): - self.socket.sendto(self.wfile.getvalue(), self.client_address) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/total_ordering.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/total_ordering.py deleted file mode 100755 index 760f06d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/total_ordering.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -For Python < 2.7.2. total_ordering in versions prior to 2.7.2 is buggy. -See http://bugs.python.org/issue10042 for details. For these versions use -code borrowed from Python 2.7.3. - -From django.utils. -""" - -import sys -if sys.version_info >= (2, 7, 2): - from functools import total_ordering -else: - def total_ordering(cls): - """Class decorator that fills in missing ordering methods""" - convert = { - '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), - ('__le__', lambda self, other: self < other or self == other), - ('__ge__', lambda self, other: not self < other)], - '__le__': [('__ge__', lambda self, other: not self <= other or self == other), - ('__lt__', lambda self, other: self <= other and not self == other), - ('__gt__', lambda self, other: not self <= other)], - '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), - ('__ge__', lambda self, other: self > other or self == other), - ('__le__', lambda self, other: not self > other)], - '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), - ('__gt__', lambda self, other: self >= other and not self == other), - ('__lt__', lambda self, other: not self >= other)] - } - roots = set(dir(cls)) & set(convert) - if not roots: - raise ValueError('must define at least one ordering operation: < > <= >=') - root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ - for opname, opfunc in convert[root]: - if opname not in roots: - opfunc.__name__ = opname - opfunc.__doc__ = getattr(int, opname).__doc__ - setattr(cls, opname, opfunc) - return cls diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/error.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/error.py deleted file mode 100755 index a473e44..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/error.py +++ /dev/null @@ -1,75 +0,0 @@ -"""Exception classes raised by urllib. - -The base exception class is URLError, which inherits from IOError. It -doesn't define any behavior of its own, but is the base class for all -exceptions defined in this package. - -HTTPError is an exception class that is also a valid HTTP response -instance. It behaves this way because HTTP protocol errors are valid -responses, with a status code, headers, and a body. In some contexts, -an application may want to handle an exception like a regular -response. -""" -from __future__ import absolute_import, division, unicode_literals -from future import standard_library - -from future.backports.urllib import response as urllib_response - - -__all__ = ['URLError', 'HTTPError', 'ContentTooShortError'] - - -# do these error classes make sense? -# make sure all of the IOError stuff is overridden. we just want to be -# subtypes. - -class URLError(IOError): - # URLError is a sub-type of IOError, but it doesn't share any of - # the implementation. need to override __init__ and __str__. - # It sets self.args for compatibility with other EnvironmentError - # subclasses, but args doesn't have the typical format with errno in - # slot 0 and strerror in slot 1. This may be better than nothing. - def __init__(self, reason, filename=None): - self.args = reason, - self.reason = reason - if filename is not None: - self.filename = filename - - def __str__(self): - return '' % self.reason - -class HTTPError(URLError, urllib_response.addinfourl): - """Raised when HTTP error occurs, but also acts like non-error return""" - __super_init = urllib_response.addinfourl.__init__ - - def __init__(self, url, code, msg, hdrs, fp): - self.code = code - self.msg = msg - self.hdrs = hdrs - self.fp = fp - self.filename = url - # The addinfourl classes depend on fp being a valid file - # object. In some cases, the HTTPError may not have a valid - # file object. If this happens, the simplest workaround is to - # not initialize the base classes. - if fp is not None: - self.__super_init(fp, hdrs, url, code) - - def __str__(self): - return 'HTTP Error %s: %s' % (self.code, self.msg) - - # since URLError specifies a .reason attribute, HTTPError should also - # provide this attribute. See issue13211 for discussion. - @property - def reason(self): - return self.msg - - def info(self): - return self.hdrs - - -# exception raised when downloaded size does not match content-length -class ContentTooShortError(URLError): - def __init__(self, message, content): - URLError.__init__(self, message) - self.content = content diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/parse.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/parse.py deleted file mode 100755 index 04e52d4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/parse.py +++ /dev/null @@ -1,991 +0,0 @@ -""" -Ported using Python-Future from the Python 3.3 standard library. - -Parse (absolute and relative) URLs. - -urlparse module is based upon the following RFC specifications. - -RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding -and L. Masinter, January 2005. - -RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter -and L.Masinter, December 1999. - -RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. -Berners-Lee, R. Fielding, and L. Masinter, August 1998. - -RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. - -RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June -1995. - -RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. -McCahill, December 1994 - -RFC 3986 is considered the current standard and any future changes to -urlparse module should conform with it. The urlparse module is -currently not entirely compliant with this RFC due to defacto -scenarios for parsing, and for backward compatibility purposes, some -parsing quirks from older RFCs are retained. The testcases in -test_urlparse.py provides a good indicator of parsing behavior. -""" -from __future__ import absolute_import, division, unicode_literals -from future.builtins import bytes, chr, dict, int, range, str -from future.utils import raise_with_traceback - -import re -import sys -import collections - -__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", - "urlsplit", "urlunsplit", "urlencode", "parse_qs", - "parse_qsl", "quote", "quote_plus", "quote_from_bytes", - "unquote", "unquote_plus", "unquote_to_bytes"] - -# A classification of schemes ('' means apply by default) -uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', - 'wais', 'file', 'https', 'shttp', 'mms', - 'prospero', 'rtsp', 'rtspu', '', 'sftp', - 'svn', 'svn+ssh'] -uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', - 'imap', 'wais', 'file', 'mms', 'https', 'shttp', - 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', - 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh'] -uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', - 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', - 'mms', '', 'sftp', 'tel'] - -# These are not actually used anymore, but should stay for backwards -# compatibility. (They are undocumented, but have a public-looking name.) -non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', - 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] -uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', - 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] -uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', - 'nntp', 'wais', 'https', 'shttp', 'snews', - 'file', 'prospero', ''] - -# Characters valid in scheme names -scheme_chars = ('abcdefghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - '0123456789' - '+-.') - -# XXX: Consider replacing with functools.lru_cache -MAX_CACHE_SIZE = 20 -_parse_cache = {} - -def clear_cache(): - """Clear the parse cache and the quoters cache.""" - _parse_cache.clear() - _safe_quoters.clear() - - -# Helpers for bytes handling -# For 3.2, we deliberately require applications that -# handle improperly quoted URLs to do their own -# decoding and encoding. If valid use cases are -# presented, we may relax this by using latin-1 -# decoding internally for 3.3 -_implicit_encoding = 'ascii' -_implicit_errors = 'strict' - -def _noop(obj): - return obj - -def _encode_result(obj, encoding=_implicit_encoding, - errors=_implicit_errors): - return obj.encode(encoding, errors) - -def _decode_args(args, encoding=_implicit_encoding, - errors=_implicit_errors): - return tuple(x.decode(encoding, errors) if x else '' for x in args) - -def _coerce_args(*args): - # Invokes decode if necessary to create str args - # and returns the coerced inputs along with - # an appropriate result coercion function - # - noop for str inputs - # - encoding function otherwise - str_input = isinstance(args[0], str) - for arg in args[1:]: - # We special-case the empty string to support the - # "scheme=''" default argument to some functions - if arg and isinstance(arg, str) != str_input: - raise TypeError("Cannot mix str and non-str arguments") - if str_input: - return args + (_noop,) - return _decode_args(args) + (_encode_result,) - -# Result objects are more helpful than simple tuples -class _ResultMixinStr(object): - """Standard approach to encoding parsed results from str to bytes""" - __slots__ = () - - def encode(self, encoding='ascii', errors='strict'): - return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) - - -class _ResultMixinBytes(object): - """Standard approach to decoding parsed results from bytes to str""" - __slots__ = () - - def decode(self, encoding='ascii', errors='strict'): - return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) - - -class _NetlocResultMixinBase(object): - """Shared methods for the parsed result objects containing a netloc element""" - __slots__ = () - - @property - def username(self): - return self._userinfo[0] - - @property - def password(self): - return self._userinfo[1] - - @property - def hostname(self): - hostname = self._hostinfo[0] - if not hostname: - hostname = None - elif hostname is not None: - hostname = hostname.lower() - return hostname - - @property - def port(self): - port = self._hostinfo[1] - if port is not None: - port = int(port, 10) - # Return None on an illegal port - if not ( 0 <= port <= 65535): - return None - return port - - -class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): - __slots__ = () - - @property - def _userinfo(self): - netloc = self.netloc - userinfo, have_info, hostinfo = netloc.rpartition('@') - if have_info: - username, have_password, password = userinfo.partition(':') - if not have_password: - password = None - else: - username = password = None - return username, password - - @property - def _hostinfo(self): - netloc = self.netloc - _, _, hostinfo = netloc.rpartition('@') - _, have_open_br, bracketed = hostinfo.partition('[') - if have_open_br: - hostname, _, port = bracketed.partition(']') - _, have_port, port = port.partition(':') - else: - hostname, have_port, port = hostinfo.partition(':') - if not have_port: - port = None - return hostname, port - - -class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): - __slots__ = () - - @property - def _userinfo(self): - netloc = self.netloc - userinfo, have_info, hostinfo = netloc.rpartition(b'@') - if have_info: - username, have_password, password = userinfo.partition(b':') - if not have_password: - password = None - else: - username = password = None - return username, password - - @property - def _hostinfo(self): - netloc = self.netloc - _, _, hostinfo = netloc.rpartition(b'@') - _, have_open_br, bracketed = hostinfo.partition(b'[') - if have_open_br: - hostname, _, port = bracketed.partition(b']') - _, have_port, port = port.partition(b':') - else: - hostname, have_port, port = hostinfo.partition(b':') - if not have_port: - port = None - return hostname, port - - -from collections import namedtuple - -_DefragResultBase = namedtuple('DefragResult', 'url fragment') -_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment') -_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment') - -# For backwards compatibility, alias _NetlocResultMixinStr -# ResultBase is no longer part of the documented API, but it is -# retained since deprecating it isn't worth the hassle -ResultBase = _NetlocResultMixinStr - -# Structured result objects for string data -class DefragResult(_DefragResultBase, _ResultMixinStr): - __slots__ = () - def geturl(self): - if self.fragment: - return self.url + '#' + self.fragment - else: - return self.url - -class SplitResult(_SplitResultBase, _NetlocResultMixinStr): - __slots__ = () - def geturl(self): - return urlunsplit(self) - -class ParseResult(_ParseResultBase, _NetlocResultMixinStr): - __slots__ = () - def geturl(self): - return urlunparse(self) - -# Structured result objects for bytes data -class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): - __slots__ = () - def geturl(self): - if self.fragment: - return self.url + b'#' + self.fragment - else: - return self.url - -class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): - __slots__ = () - def geturl(self): - return urlunsplit(self) - -class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): - __slots__ = () - def geturl(self): - return urlunparse(self) - -# Set up the encode/decode result pairs -def _fix_result_transcoding(): - _result_pairs = ( - (DefragResult, DefragResultBytes), - (SplitResult, SplitResultBytes), - (ParseResult, ParseResultBytes), - ) - for _decoded, _encoded in _result_pairs: - _decoded._encoded_counterpart = _encoded - _encoded._decoded_counterpart = _decoded - -_fix_result_transcoding() -del _fix_result_transcoding - -def urlparse(url, scheme='', allow_fragments=True): - """Parse a URL into 6 components: - :///;?# - Return a 6-tuple: (scheme, netloc, path, params, query, fragment). - Note that we don't break the components up in smaller bits - (e.g. netloc is a single string) and we don't expand % escapes.""" - url, scheme, _coerce_result = _coerce_args(url, scheme) - splitresult = urlsplit(url, scheme, allow_fragments) - scheme, netloc, url, query, fragment = splitresult - if scheme in uses_params and ';' in url: - url, params = _splitparams(url) - else: - params = '' - result = ParseResult(scheme, netloc, url, params, query, fragment) - return _coerce_result(result) - -def _splitparams(url): - if '/' in url: - i = url.find(';', url.rfind('/')) - if i < 0: - return url, '' - else: - i = url.find(';') - return url[:i], url[i+1:] - -def _splitnetloc(url, start=0): - delim = len(url) # position of end of domain part of url, default is end - for c in '/?#': # look for delimiters; the order is NOT important - wdelim = url.find(c, start) # find first of this delim - if wdelim >= 0: # if found - delim = min(delim, wdelim) # use earliest delim position - return url[start:delim], url[delim:] # return (domain, rest) - -def urlsplit(url, scheme='', allow_fragments=True): - """Parse a URL into 5 components: - :///?# - Return a 5-tuple: (scheme, netloc, path, query, fragment). - Note that we don't break the components up in smaller bits - (e.g. netloc is a single string) and we don't expand % escapes.""" - url, scheme, _coerce_result = _coerce_args(url, scheme) - allow_fragments = bool(allow_fragments) - key = url, scheme, allow_fragments, type(url), type(scheme) - cached = _parse_cache.get(key, None) - if cached: - return _coerce_result(cached) - if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth - clear_cache() - netloc = query = fragment = '' - i = url.find(':') - if i > 0: - if url[:i] == 'http': # optimize the common case - scheme = url[:i].lower() - url = url[i+1:] - if url[:2] == '//': - netloc, url = _splitnetloc(url, 2) - if (('[' in netloc and ']' not in netloc) or - (']' in netloc and '[' not in netloc)): - raise ValueError("Invalid IPv6 URL") - if allow_fragments and '#' in url: - url, fragment = url.split('#', 1) - if '?' in url: - url, query = url.split('?', 1) - v = SplitResult(scheme, netloc, url, query, fragment) - _parse_cache[key] = v - return _coerce_result(v) - for c in url[:i]: - if c not in scheme_chars: - break - else: - # make sure "url" is not actually a port number (in which case - # "scheme" is really part of the path) - rest = url[i+1:] - if not rest or any(c not in '0123456789' for c in rest): - # not a port number - scheme, url = url[:i].lower(), rest - - if url[:2] == '//': - netloc, url = _splitnetloc(url, 2) - if (('[' in netloc and ']' not in netloc) or - (']' in netloc and '[' not in netloc)): - raise ValueError("Invalid IPv6 URL") - if allow_fragments and '#' in url: - url, fragment = url.split('#', 1) - if '?' in url: - url, query = url.split('?', 1) - v = SplitResult(scheme, netloc, url, query, fragment) - _parse_cache[key] = v - return _coerce_result(v) - -def urlunparse(components): - """Put a parsed URL back together again. This may result in a - slightly different, but equivalent URL, if the URL that was parsed - originally had redundant delimiters, e.g. a ? with an empty query - (the draft states that these are equivalent).""" - scheme, netloc, url, params, query, fragment, _coerce_result = ( - _coerce_args(*components)) - if params: - url = "%s;%s" % (url, params) - return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) - -def urlunsplit(components): - """Combine the elements of a tuple as returned by urlsplit() into a - complete URL as a string. The data argument can be any five-item iterable. - This may result in a slightly different, but equivalent URL, if the URL that - was parsed originally had unnecessary delimiters (for example, a ? with an - empty query; the RFC states that these are equivalent).""" - scheme, netloc, url, query, fragment, _coerce_result = ( - _coerce_args(*components)) - if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): - if url and url[:1] != '/': url = '/' + url - url = '//' + (netloc or '') + url - if scheme: - url = scheme + ':' + url - if query: - url = url + '?' + query - if fragment: - url = url + '#' + fragment - return _coerce_result(url) - -def urljoin(base, url, allow_fragments=True): - """Join a base URL and a possibly relative URL to form an absolute - interpretation of the latter.""" - if not base: - return url - if not url: - return base - base, url, _coerce_result = _coerce_args(base, url) - bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ - urlparse(base, '', allow_fragments) - scheme, netloc, path, params, query, fragment = \ - urlparse(url, bscheme, allow_fragments) - if scheme != bscheme or scheme not in uses_relative: - return _coerce_result(url) - if scheme in uses_netloc: - if netloc: - return _coerce_result(urlunparse((scheme, netloc, path, - params, query, fragment))) - netloc = bnetloc - if path[:1] == '/': - return _coerce_result(urlunparse((scheme, netloc, path, - params, query, fragment))) - if not path and not params: - path = bpath - params = bparams - if not query: - query = bquery - return _coerce_result(urlunparse((scheme, netloc, path, - params, query, fragment))) - segments = bpath.split('/')[:-1] + path.split('/') - # XXX The stuff below is bogus in various ways... - if segments[-1] == '.': - segments[-1] = '' - while '.' in segments: - segments.remove('.') - while 1: - i = 1 - n = len(segments) - 1 - while i < n: - if (segments[i] == '..' - and segments[i-1] not in ('', '..')): - del segments[i-1:i+1] - break - i = i+1 - else: - break - if segments == ['', '..']: - segments[-1] = '' - elif len(segments) >= 2 and segments[-1] == '..': - segments[-2:] = [''] - return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments), - params, query, fragment))) - -def urldefrag(url): - """Removes any existing fragment from URL. - - Returns a tuple of the defragmented URL and the fragment. If - the URL contained no fragments, the second element is the - empty string. - """ - url, _coerce_result = _coerce_args(url) - if '#' in url: - s, n, p, a, q, frag = urlparse(url) - defrag = urlunparse((s, n, p, a, q, '')) - else: - frag = '' - defrag = url - return _coerce_result(DefragResult(defrag, frag)) - -_hexdig = '0123456789ABCDEFabcdef' -_hextobyte = dict(((a + b).encode(), bytes([int(a + b, 16)])) - for a in _hexdig for b in _hexdig) - -def unquote_to_bytes(string): - """unquote_to_bytes('abc%20def') -> b'abc def'.""" - # Note: strings are encoded as UTF-8. This is only an issue if it contains - # unescaped non-ASCII characters, which URIs should not. - if not string: - # Is it a string-like object? - string.split - return bytes(b'') - if isinstance(string, str): - string = string.encode('utf-8') - ### For Python-Future: - # It is already a byte-string object, but force it to be newbytes here on - # Py2: - string = bytes(string) - ### - bits = string.split(b'%') - if len(bits) == 1: - return string - res = [bits[0]] - append = res.append - for item in bits[1:]: - try: - append(_hextobyte[item[:2]]) - append(item[2:]) - except KeyError: - append(b'%') - append(item) - return bytes(b'').join(res) - -_asciire = re.compile('([\x00-\x7f]+)') - -def unquote(string, encoding='utf-8', errors='replace'): - """Replace %xx escapes by their single-character equivalent. The optional - encoding and errors parameters specify how to decode percent-encoded - sequences into Unicode characters, as accepted by the bytes.decode() - method. - By default, percent-encoded sequences are decoded with UTF-8, and invalid - sequences are replaced by a placeholder character. - - unquote('abc%20def') -> 'abc def'. - """ - if '%' not in string: - string.split - return string - if encoding is None: - encoding = 'utf-8' - if errors is None: - errors = 'replace' - bits = _asciire.split(string) - res = [bits[0]] - append = res.append - for i in range(1, len(bits), 2): - append(unquote_to_bytes(bits[i]).decode(encoding, errors)) - append(bits[i + 1]) - return ''.join(res) - -def parse_qs(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace'): - """Parse a query given as a string argument. - - Arguments: - - qs: percent-encoded query string to be parsed - - keep_blank_values: flag indicating whether blank values in - percent-encoded queries should be treated as blank strings. - A true value indicates that blanks should be retained as - blank strings. The default false value indicates that - blank values are to be ignored and treated as if they were - not included. - - strict_parsing: flag indicating what to do with parsing errors. - If false (the default), errors are silently ignored. - If true, errors raise a ValueError exception. - - encoding and errors: specify how to decode percent-encoded sequences - into Unicode characters, as accepted by the bytes.decode() method. - """ - parsed_result = {} - pairs = parse_qsl(qs, keep_blank_values, strict_parsing, - encoding=encoding, errors=errors) - for name, value in pairs: - if name in parsed_result: - parsed_result[name].append(value) - else: - parsed_result[name] = [value] - return parsed_result - -def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, - encoding='utf-8', errors='replace'): - """Parse a query given as a string argument. - - Arguments: - - qs: percent-encoded query string to be parsed - - keep_blank_values: flag indicating whether blank values in - percent-encoded queries should be treated as blank strings. A - true value indicates that blanks should be retained as blank - strings. The default false value indicates that blank values - are to be ignored and treated as if they were not included. - - strict_parsing: flag indicating what to do with parsing errors. If - false (the default), errors are silently ignored. If true, - errors raise a ValueError exception. - - encoding and errors: specify how to decode percent-encoded sequences - into Unicode characters, as accepted by the bytes.decode() method. - - Returns a list, as G-d intended. - """ - qs, _coerce_result = _coerce_args(qs) - pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] - r = [] - for name_value in pairs: - if not name_value and not strict_parsing: - continue - nv = name_value.split('=', 1) - if len(nv) != 2: - if strict_parsing: - raise ValueError("bad query field: %r" % (name_value,)) - # Handle case of a control-name with no equal sign - if keep_blank_values: - nv.append('') - else: - continue - if len(nv[1]) or keep_blank_values: - name = nv[0].replace('+', ' ') - name = unquote(name, encoding=encoding, errors=errors) - name = _coerce_result(name) - value = nv[1].replace('+', ' ') - value = unquote(value, encoding=encoding, errors=errors) - value = _coerce_result(value) - r.append((name, value)) - return r - -def unquote_plus(string, encoding='utf-8', errors='replace'): - """Like unquote(), but also replace plus signs by spaces, as required for - unquoting HTML form values. - - unquote_plus('%7e/abc+def') -> '~/abc def' - """ - string = string.replace('+', ' ') - return unquote(string, encoding, errors) - -_ALWAYS_SAFE = frozenset(bytes(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - b'abcdefghijklmnopqrstuvwxyz' - b'0123456789' - b'_.-')) -_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) -_safe_quoters = {} - -class Quoter(collections.defaultdict): - """A mapping from bytes (in range(0,256)) to strings. - - String values are percent-encoded byte values, unless the key < 128, and - in the "safe" set (either the specified safe set, or default set). - """ - # Keeps a cache internally, using defaultdict, for efficiency (lookups - # of cached keys don't call Python code at all). - def __init__(self, safe): - """safe: bytes object.""" - self.safe = _ALWAYS_SAFE.union(bytes(safe)) - - def __repr__(self): - # Without this, will just display as a defaultdict - return "" % dict(self) - - def __missing__(self, b): - # Handle a cache miss. Store quoted string in cache and return. - res = chr(b) if b in self.safe else '%{0:02X}'.format(b) - self[b] = res - return res - -def quote(string, safe='/', encoding=None, errors=None): - """quote('abc def') -> 'abc%20def' - - Each part of a URL, e.g. the path info, the query, etc., has a - different set of reserved characters that must be quoted. - - RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists - the following reserved characters. - - reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | - "$" | "," - - Each of these characters is reserved in some component of a URL, - but not necessarily in all of them. - - By default, the quote function is intended for quoting the path - section of a URL. Thus, it will not encode '/'. This character - is reserved, but in typical usage the quote function is being - called on a path where the existing slash characters are used as - reserved characters. - - string and safe may be either str or bytes objects. encoding must - not be specified if string is a str. - - The optional encoding and errors parameters specify how to deal with - non-ASCII characters, as accepted by the str.encode method. - By default, encoding='utf-8' (characters are encoded with UTF-8), and - errors='strict' (unsupported characters raise a UnicodeEncodeError). - """ - if isinstance(string, str): - if not string: - return string - if encoding is None: - encoding = 'utf-8' - if errors is None: - errors = 'strict' - string = string.encode(encoding, errors) - else: - if encoding is not None: - raise TypeError("quote() doesn't support 'encoding' for bytes") - if errors is not None: - raise TypeError("quote() doesn't support 'errors' for bytes") - return quote_from_bytes(string, safe) - -def quote_plus(string, safe='', encoding=None, errors=None): - """Like quote(), but also replace ' ' with '+', as required for quoting - HTML form values. Plus signs in the original string are escaped unless - they are included in safe. It also does not have safe default to '/'. - """ - # Check if ' ' in string, where string may either be a str or bytes. If - # there are no spaces, the regular quote will produce the right answer. - if ((isinstance(string, str) and ' ' not in string) or - (isinstance(string, bytes) and b' ' not in string)): - return quote(string, safe, encoding, errors) - if isinstance(safe, str): - space = str(' ') - else: - space = bytes(b' ') - string = quote(string, safe + space, encoding, errors) - return string.replace(' ', '+') - -def quote_from_bytes(bs, safe='/'): - """Like quote(), but accepts a bytes object rather than a str, and does - not perform string-to-bytes encoding. It always returns an ASCII string. - quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' - """ - if not isinstance(bs, (bytes, bytearray)): - raise TypeError("quote_from_bytes() expected bytes") - if not bs: - return str('') - ### For Python-Future: - bs = bytes(bs) - ### - if isinstance(safe, str): - # Normalize 'safe' by converting to bytes and removing non-ASCII chars - safe = str(safe).encode('ascii', 'ignore') - else: - ### For Python-Future: - safe = bytes(safe) - ### - safe = bytes([c for c in safe if c < 128]) - if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): - return bs.decode() - try: - quoter = _safe_quoters[safe] - except KeyError: - _safe_quoters[safe] = quoter = Quoter(safe).__getitem__ - return str('').join([quoter(char) for char in bs]) - -def urlencode(query, doseq=False, safe='', encoding=None, errors=None): - """Encode a sequence of two-element tuples or dictionary into a URL query string. - - If any values in the query arg are sequences and doseq is true, each - sequence element is converted to a separate parameter. - - If the query arg is a sequence of two-element tuples, the order of the - parameters in the output will match the order of parameters in the - input. - - The query arg may be either a string or a bytes type. When query arg is a - string, the safe, encoding and error parameters are sent the quote_plus for - encoding. - """ - - if hasattr(query, "items"): - query = query.items() - else: - # It's a bother at times that strings and string-like objects are - # sequences. - try: - # non-sequence items should not work with len() - # non-empty strings will fail this - if len(query) and not isinstance(query[0], tuple): - raise TypeError - # Zero-length sequences of all types will get here and succeed, - # but that's a minor nit. Since the original implementation - # allowed empty dicts that type of behavior probably should be - # preserved for consistency - except TypeError: - ty, va, tb = sys.exc_info() - raise_with_traceback(TypeError("not a valid non-string sequence " - "or mapping object"), tb) - - l = [] - if not doseq: - for k, v in query: - if isinstance(k, bytes): - k = quote_plus(k, safe) - else: - k = quote_plus(str(k), safe, encoding, errors) - - if isinstance(v, bytes): - v = quote_plus(v, safe) - else: - v = quote_plus(str(v), safe, encoding, errors) - l.append(k + '=' + v) - else: - for k, v in query: - if isinstance(k, bytes): - k = quote_plus(k, safe) - else: - k = quote_plus(str(k), safe, encoding, errors) - - if isinstance(v, bytes): - v = quote_plus(v, safe) - l.append(k + '=' + v) - elif isinstance(v, str): - v = quote_plus(v, safe, encoding, errors) - l.append(k + '=' + v) - else: - try: - # Is this a sufficient test for sequence-ness? - x = len(v) - except TypeError: - # not a sequence - v = quote_plus(str(v), safe, encoding, errors) - l.append(k + '=' + v) - else: - # loop over the sequence - for elt in v: - if isinstance(elt, bytes): - elt = quote_plus(elt, safe) - else: - elt = quote_plus(str(elt), safe, encoding, errors) - l.append(k + '=' + elt) - return str('&').join(l) - -# Utilities to parse URLs (most of these return None for missing parts): -# unwrap('') --> 'type://host/path' -# splittype('type:opaquestring') --> 'type', 'opaquestring' -# splithost('//host[:port]/path') --> 'host[:port]', '/path' -# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]' -# splitpasswd('user:passwd') -> 'user', 'passwd' -# splitport('host:port') --> 'host', 'port' -# splitquery('/path?query') --> '/path', 'query' -# splittag('/path#tag') --> '/path', 'tag' -# splitattr('/path;attr1=value1;attr2=value2;...') -> -# '/path', ['attr1=value1', 'attr2=value2', ...] -# splitvalue('attr=value') --> 'attr', 'value' -# urllib.parse.unquote('abc%20def') -> 'abc def' -# quote('abc def') -> 'abc%20def') - -def to_bytes(url): - """to_bytes(u"URL") --> 'URL'.""" - # Most URL schemes require ASCII. If that changes, the conversion - # can be relaxed. - # XXX get rid of to_bytes() - if isinstance(url, str): - try: - url = url.encode("ASCII").decode() - except UnicodeError: - raise UnicodeError("URL " + repr(url) + - " contains non-ASCII characters") - return url - -def unwrap(url): - """unwrap('') --> 'type://host/path'.""" - url = str(url).strip() - if url[:1] == '<' and url[-1:] == '>': - url = url[1:-1].strip() - if url[:4] == 'URL:': url = url[4:].strip() - return url - -_typeprog = None -def splittype(url): - """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" - global _typeprog - if _typeprog is None: - import re - _typeprog = re.compile('^([^/:]+):') - - match = _typeprog.match(url) - if match: - scheme = match.group(1) - return scheme.lower(), url[len(scheme) + 1:] - return None, url - -_hostprog = None -def splithost(url): - """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" - global _hostprog - if _hostprog is None: - import re - _hostprog = re.compile('^//([^/?]*)(.*)$') - - match = _hostprog.match(url) - if match: - host_port = match.group(1) - path = match.group(2) - if path and not path.startswith('/'): - path = '/' + path - return host_port, path - return None, url - -_userprog = None -def splituser(host): - """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - global _userprog - if _userprog is None: - import re - _userprog = re.compile('^(.*)@(.*)$') - - match = _userprog.match(host) - if match: return match.group(1, 2) - return None, host - -_passwdprog = None -def splitpasswd(user): - """splitpasswd('user:passwd') -> 'user', 'passwd'.""" - global _passwdprog - if _passwdprog is None: - import re - _passwdprog = re.compile('^([^:]*):(.*)$',re.S) - - match = _passwdprog.match(user) - if match: return match.group(1, 2) - return user, None - -# splittag('/path#tag') --> '/path', 'tag' -_portprog = None -def splitport(host): - """splitport('host:port') --> 'host', 'port'.""" - global _portprog - if _portprog is None: - import re - _portprog = re.compile('^(.*):([0-9]+)$') - - match = _portprog.match(host) - if match: return match.group(1, 2) - return host, None - -_nportprog = None -def splitnport(host, defport=-1): - """Split host and port, returning numeric port. - Return given default port if no ':' found; defaults to -1. - Return numerical port if a valid number are found after ':'. - Return None if ':' but not a valid number.""" - global _nportprog - if _nportprog is None: - import re - _nportprog = re.compile('^(.*):(.*)$') - - match = _nportprog.match(host) - if match: - host, port = match.group(1, 2) - try: - if not port: raise ValueError("no digits") - nport = int(port) - except ValueError: - nport = None - return host, nport - return host, defport - -_queryprog = None -def splitquery(url): - """splitquery('/path?query') --> '/path', 'query'.""" - global _queryprog - if _queryprog is None: - import re - _queryprog = re.compile('^(.*)\?([^?]*)$') - - match = _queryprog.match(url) - if match: return match.group(1, 2) - return url, None - -_tagprog = None -def splittag(url): - """splittag('/path#tag') --> '/path', 'tag'.""" - global _tagprog - if _tagprog is None: - import re - _tagprog = re.compile('^(.*)#([^#]*)$') - - match = _tagprog.match(url) - if match: return match.group(1, 2) - return url, None - -def splitattr(url): - """splitattr('/path;attr1=value1;attr2=value2;...') -> - '/path', ['attr1=value1', 'attr2=value2', ...].""" - words = url.split(';') - return words[0], words[1:] - -_valueprog = None -def splitvalue(attr): - """splitvalue('attr=value') --> 'attr', 'value'.""" - global _valueprog - if _valueprog is None: - import re - _valueprog = re.compile('^([^=]*)=(.*)$') - - match = _valueprog.match(attr) - if match: return match.group(1, 2) - return attr, None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/request.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/request.py deleted file mode 100755 index baee540..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/request.py +++ /dev/null @@ -1,2647 +0,0 @@ -""" -Ported using Python-Future from the Python 3.3 standard library. - -An extensible library for opening URLs using a variety of protocols - -The simplest way to use this module is to call the urlopen function, -which accepts a string containing a URL or a Request object (described -below). It opens the URL and returns the results as file-like -object; the returned object has some extra methods described below. - -The OpenerDirector manages a collection of Handler objects that do -all the actual work. Each Handler implements a particular protocol or -option. The OpenerDirector is a composite object that invokes the -Handlers needed to open the requested URL. For example, the -HTTPHandler performs HTTP GET and POST requests and deals with -non-error returns. The HTTPRedirectHandler automatically deals with -HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler -deals with digest authentication. - -urlopen(url, data=None) -- Basic usage is the same as original -urllib. pass the url and optionally data to post to an HTTP URL, and -get a file-like object back. One difference is that you can also pass -a Request instance instead of URL. Raises a URLError (subclass of -IOError); for HTTP errors, raises an HTTPError, which can also be -treated as a valid response. - -build_opener -- Function that creates a new OpenerDirector instance. -Will install the default handlers. Accepts one or more Handlers as -arguments, either instances or Handler classes that it will -instantiate. If one of the argument is a subclass of the default -handler, the argument will be installed instead of the default. - -install_opener -- Installs a new opener as the default opener. - -objects of interest: - -OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages -the Handler classes, while dealing with requests and responses. - -Request -- An object that encapsulates the state of a request. The -state can be as simple as the URL. It can also include extra HTTP -headers, e.g. a User-Agent. - -BaseHandler -- - -internals: -BaseHandler and parent -_call_chain conventions - -Example usage: - -import urllib.request - -# set up authentication info -authinfo = urllib.request.HTTPBasicAuthHandler() -authinfo.add_password(realm='PDQ Application', - uri='https://mahler:8092/site-updates.py', - user='klem', - passwd='geheim$parole') - -proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) - -# build a new opener that adds authentication and caching FTP handlers -opener = urllib.request.build_opener(proxy_support, authinfo, - urllib.request.CacheFTPHandler) - -# install it -urllib.request.install_opener(opener) - -f = urllib.request.urlopen('http://www.python.org/') -""" - -# XXX issues: -# If an authentication error handler that tries to perform -# authentication for some reason but fails, how should the error be -# signalled? The client needs to know the HTTP error code. But if -# the handler knows that the problem was, e.g., that it didn't know -# that hash algo that requested in the challenge, it would be good to -# pass that information along to the client, too. -# ftp errors aren't handled cleanly -# check digest against correct (i.e. non-apache) implementation - -# Possible extensions: -# complex proxies XXX not sure what exactly was meant by this -# abstract factory for opener - -from __future__ import absolute_import, division, print_function, unicode_literals -from future.builtins import bytes, dict, filter, input, int, map, open, str -from future.utils import PY2, PY3, raise_with_traceback - -import base64 -import bisect -import hashlib -import array - -from future.backports import email -from future.backports.http import client as http_client -from .error import URLError, HTTPError, ContentTooShortError -from .parse import ( - urlparse, urlsplit, urljoin, unwrap, quote, unquote, - splittype, splithost, splitport, splituser, splitpasswd, - splitattr, splitquery, splitvalue, splittag, to_bytes, urlunparse) -from .response import addinfourl, addclosehook - -import io -import os -import posixpath -import re -import socket -import sys -import time -import tempfile -import contextlib -import warnings - -from future.utils import PY2 - -if PY2: - from collections import Iterable -else: - from collections.abc import Iterable - -# check for SSL -try: - import ssl - # Not available in the SSL module in Py2: - from ssl import SSLContext -except ImportError: - _have_ssl = False -else: - _have_ssl = True - -__all__ = [ - # Classes - 'Request', 'OpenerDirector', 'BaseHandler', 'HTTPDefaultErrorHandler', - 'HTTPRedirectHandler', 'HTTPCookieProcessor', 'ProxyHandler', - 'HTTPPasswordMgr', 'HTTPPasswordMgrWithDefaultRealm', - 'AbstractBasicAuthHandler', 'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler', - 'AbstractDigestAuthHandler', 'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler', - 'HTTPHandler', 'FileHandler', 'FTPHandler', 'CacheFTPHandler', - 'UnknownHandler', 'HTTPErrorProcessor', - # Functions - 'urlopen', 'install_opener', 'build_opener', - 'pathname2url', 'url2pathname', 'getproxies', - # Legacy interface - 'urlretrieve', 'urlcleanup', 'URLopener', 'FancyURLopener', -] - -# used in User-Agent header sent -__version__ = sys.version[:3] - -_opener = None -def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **_3to2kwargs): - if 'cadefault' in _3to2kwargs: cadefault = _3to2kwargs['cadefault']; del _3to2kwargs['cadefault'] - else: cadefault = False - if 'capath' in _3to2kwargs: capath = _3to2kwargs['capath']; del _3to2kwargs['capath'] - else: capath = None - if 'cafile' in _3to2kwargs: cafile = _3to2kwargs['cafile']; del _3to2kwargs['cafile'] - else: cafile = None - global _opener - if cafile or capath or cadefault: - if not _have_ssl: - raise ValueError('SSL support not available') - context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) - context.options |= ssl.OP_NO_SSLv2 - context.verify_mode = ssl.CERT_REQUIRED - if cafile or capath: - context.load_verify_locations(cafile, capath) - else: - context.set_default_verify_paths() - https_handler = HTTPSHandler(context=context, check_hostname=True) - opener = build_opener(https_handler) - elif _opener is None: - _opener = opener = build_opener() - else: - opener = _opener - return opener.open(url, data, timeout) - -def install_opener(opener): - global _opener - _opener = opener - -_url_tempfiles = [] -def urlretrieve(url, filename=None, reporthook=None, data=None): - """ - Retrieve a URL into a temporary location on disk. - - Requires a URL argument. If a filename is passed, it is used as - the temporary file location. The reporthook argument should be - a callable that accepts a block number, a read size, and the - total file size of the URL target. The data argument should be - valid URL encoded data. - - If a filename is passed and the URL points to a local resource, - the result is a copy from local file to new file. - - Returns a tuple containing the path to the newly created - data file as well as the resulting HTTPMessage object. - """ - url_type, path = splittype(url) - - with contextlib.closing(urlopen(url, data)) as fp: - headers = fp.info() - - # Just return the local path and the "headers" for file:// - # URLs. No sense in performing a copy unless requested. - if url_type == "file" and not filename: - return os.path.normpath(path), headers - - # Handle temporary file setup. - if filename: - tfp = open(filename, 'wb') - else: - tfp = tempfile.NamedTemporaryFile(delete=False) - filename = tfp.name - _url_tempfiles.append(filename) - - with tfp: - result = filename, headers - bs = 1024*8 - size = -1 - read = 0 - blocknum = 0 - if "content-length" in headers: - size = int(headers["Content-Length"]) - - if reporthook: - reporthook(blocknum, bs, size) - - while True: - block = fp.read(bs) - if not block: - break - read += len(block) - tfp.write(block) - blocknum += 1 - if reporthook: - reporthook(blocknum, bs, size) - - if size >= 0 and read < size: - raise ContentTooShortError( - "retrieval incomplete: got only %i out of %i bytes" - % (read, size), result) - - return result - -def urlcleanup(): - for temp_file in _url_tempfiles: - try: - os.unlink(temp_file) - except EnvironmentError: - pass - - del _url_tempfiles[:] - global _opener - if _opener: - _opener = None - -if PY3: - _cut_port_re = re.compile(r":\d+$", re.ASCII) -else: - _cut_port_re = re.compile(r":\d+$") - -def request_host(request): - - """Return request-host, as defined by RFC 2965. - - Variation from RFC: returned value is lowercased, for convenient - comparison. - - """ - url = request.full_url - host = urlparse(url)[1] - if host == "": - host = request.get_header("Host", "") - - # remove port, if present - host = _cut_port_re.sub("", host, 1) - return host.lower() - -class Request(object): - - def __init__(self, url, data=None, headers={}, - origin_req_host=None, unverifiable=False, - method=None): - # unwrap('') --> 'type://host/path' - self.full_url = unwrap(url) - self.full_url, self.fragment = splittag(self.full_url) - self.data = data - self.headers = {} - self._tunnel_host = None - for key, value in headers.items(): - self.add_header(key, value) - self.unredirected_hdrs = {} - if origin_req_host is None: - origin_req_host = request_host(self) - self.origin_req_host = origin_req_host - self.unverifiable = unverifiable - self.method = method - self._parse() - - def _parse(self): - self.type, rest = splittype(self.full_url) - if self.type is None: - raise ValueError("unknown url type: %r" % self.full_url) - self.host, self.selector = splithost(rest) - if self.host: - self.host = unquote(self.host) - - def get_method(self): - """Return a string indicating the HTTP request method.""" - if self.method is not None: - return self.method - elif self.data is not None: - return "POST" - else: - return "GET" - - def get_full_url(self): - if self.fragment: - return '%s#%s' % (self.full_url, self.fragment) - else: - return self.full_url - - # Begin deprecated methods - - def add_data(self, data): - msg = "Request.add_data method is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=1) - self.data = data - - def has_data(self): - msg = "Request.has_data method is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=1) - return self.data is not None - - def get_data(self): - msg = "Request.get_data method is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=1) - return self.data - - def get_type(self): - msg = "Request.get_type method is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=1) - return self.type - - def get_host(self): - msg = "Request.get_host method is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=1) - return self.host - - def get_selector(self): - msg = "Request.get_selector method is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=1) - return self.selector - - def is_unverifiable(self): - msg = "Request.is_unverifiable method is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=1) - return self.unverifiable - - def get_origin_req_host(self): - msg = "Request.get_origin_req_host method is deprecated." - warnings.warn(msg, DeprecationWarning, stacklevel=1) - return self.origin_req_host - - # End deprecated methods - - def set_proxy(self, host, type): - if self.type == 'https' and not self._tunnel_host: - self._tunnel_host = self.host - else: - self.type= type - self.selector = self.full_url - self.host = host - - def has_proxy(self): - return self.selector == self.full_url - - def add_header(self, key, val): - # useful for something like authentication - self.headers[key.capitalize()] = val - - def add_unredirected_header(self, key, val): - # will not be added to a redirected request - self.unredirected_hdrs[key.capitalize()] = val - - def has_header(self, header_name): - return (header_name in self.headers or - header_name in self.unredirected_hdrs) - - def get_header(self, header_name, default=None): - return self.headers.get( - header_name, - self.unredirected_hdrs.get(header_name, default)) - - def header_items(self): - hdrs = self.unredirected_hdrs.copy() - hdrs.update(self.headers) - return list(hdrs.items()) - -class OpenerDirector(object): - def __init__(self): - client_version = "Python-urllib/%s" % __version__ - self.addheaders = [('User-agent', client_version)] - # self.handlers is retained only for backward compatibility - self.handlers = [] - # manage the individual handlers - self.handle_open = {} - self.handle_error = {} - self.process_response = {} - self.process_request = {} - - def add_handler(self, handler): - if not hasattr(handler, "add_parent"): - raise TypeError("expected BaseHandler instance, got %r" % - type(handler)) - - added = False - for meth in dir(handler): - if meth in ["redirect_request", "do_open", "proxy_open"]: - # oops, coincidental match - continue - - i = meth.find("_") - protocol = meth[:i] - condition = meth[i+1:] - - if condition.startswith("error"): - j = condition.find("_") + i + 1 - kind = meth[j+1:] - try: - kind = int(kind) - except ValueError: - pass - lookup = self.handle_error.get(protocol, {}) - self.handle_error[protocol] = lookup - elif condition == "open": - kind = protocol - lookup = self.handle_open - elif condition == "response": - kind = protocol - lookup = self.process_response - elif condition == "request": - kind = protocol - lookup = self.process_request - else: - continue - - handlers = lookup.setdefault(kind, []) - if handlers: - bisect.insort(handlers, handler) - else: - handlers.append(handler) - added = True - - if added: - bisect.insort(self.handlers, handler) - handler.add_parent(self) - - def close(self): - # Only exists for backwards compatibility. - pass - - def _call_chain(self, chain, kind, meth_name, *args): - # Handlers raise an exception if no one else should try to handle - # the request, or return None if they can't but another handler - # could. Otherwise, they return the response. - handlers = chain.get(kind, ()) - for handler in handlers: - func = getattr(handler, meth_name) - result = func(*args) - if result is not None: - return result - - def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): - """ - Accept a URL or a Request object - - Python-Future: if the URL is passed as a byte-string, decode it first. - """ - if isinstance(fullurl, bytes): - fullurl = fullurl.decode() - if isinstance(fullurl, str): - req = Request(fullurl, data) - else: - req = fullurl - if data is not None: - req.data = data - - req.timeout = timeout - protocol = req.type - - # pre-process request - meth_name = protocol+"_request" - for processor in self.process_request.get(protocol, []): - meth = getattr(processor, meth_name) - req = meth(req) - - response = self._open(req, data) - - # post-process response - meth_name = protocol+"_response" - for processor in self.process_response.get(protocol, []): - meth = getattr(processor, meth_name) - response = meth(req, response) - - return response - - def _open(self, req, data=None): - result = self._call_chain(self.handle_open, 'default', - 'default_open', req) - if result: - return result - - protocol = req.type - result = self._call_chain(self.handle_open, protocol, protocol + - '_open', req) - if result: - return result - - return self._call_chain(self.handle_open, 'unknown', - 'unknown_open', req) - - def error(self, proto, *args): - if proto in ('http', 'https'): - # XXX http[s] protocols are special-cased - dict = self.handle_error['http'] # https is not different than http - proto = args[2] # YUCK! - meth_name = 'http_error_%s' % proto - http_err = 1 - orig_args = args - else: - dict = self.handle_error - meth_name = proto + '_error' - http_err = 0 - args = (dict, proto, meth_name) + args - result = self._call_chain(*args) - if result: - return result - - if http_err: - args = (dict, 'default', 'http_error_default') + orig_args - return self._call_chain(*args) - -# XXX probably also want an abstract factory that knows when it makes -# sense to skip a superclass in favor of a subclass and when it might -# make sense to include both - -def build_opener(*handlers): - """Create an opener object from a list of handlers. - - The opener will use several default handlers, including support - for HTTP, FTP and when applicable HTTPS. - - If any of the handlers passed as arguments are subclasses of the - default handlers, the default handlers will not be used. - """ - def isclass(obj): - return isinstance(obj, type) or hasattr(obj, "__bases__") - - opener = OpenerDirector() - default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, - HTTPDefaultErrorHandler, HTTPRedirectHandler, - FTPHandler, FileHandler, HTTPErrorProcessor] - if hasattr(http_client, "HTTPSConnection"): - default_classes.append(HTTPSHandler) - skip = set() - for klass in default_classes: - for check in handlers: - if isclass(check): - if issubclass(check, klass): - skip.add(klass) - elif isinstance(check, klass): - skip.add(klass) - for klass in skip: - default_classes.remove(klass) - - for klass in default_classes: - opener.add_handler(klass()) - - for h in handlers: - if isclass(h): - h = h() - opener.add_handler(h) - return opener - -class BaseHandler(object): - handler_order = 500 - - def add_parent(self, parent): - self.parent = parent - - def close(self): - # Only exists for backwards compatibility - pass - - def __lt__(self, other): - if not hasattr(other, "handler_order"): - # Try to preserve the old behavior of having custom classes - # inserted after default ones (works only for custom user - # classes which are not aware of handler_order). - return True - return self.handler_order < other.handler_order - - -class HTTPErrorProcessor(BaseHandler): - """Process HTTP error responses.""" - handler_order = 1000 # after all other processing - - def http_response(self, request, response): - code, msg, hdrs = response.code, response.msg, response.info() - - # According to RFC 2616, "2xx" code indicates that the client's - # request was successfully received, understood, and accepted. - if not (200 <= code < 300): - response = self.parent.error( - 'http', request, response, code, msg, hdrs) - - return response - - https_response = http_response - -class HTTPDefaultErrorHandler(BaseHandler): - def http_error_default(self, req, fp, code, msg, hdrs): - raise HTTPError(req.full_url, code, msg, hdrs, fp) - -class HTTPRedirectHandler(BaseHandler): - # maximum number of redirections to any single URL - # this is needed because of the state that cookies introduce - max_repeats = 4 - # maximum total number of redirections (regardless of URL) before - # assuming we're in a loop - max_redirections = 10 - - def redirect_request(self, req, fp, code, msg, headers, newurl): - """Return a Request or None in response to a redirect. - - This is called by the http_error_30x methods when a - redirection response is received. If a redirection should - take place, return a new Request to allow http_error_30x to - perform the redirect. Otherwise, raise HTTPError if no-one - else should try to handle this url. Return None if you can't - but another Handler might. - """ - m = req.get_method() - if (not (code in (301, 302, 303, 307) and m in ("GET", "HEAD") - or code in (301, 302, 303) and m == "POST")): - raise HTTPError(req.full_url, code, msg, headers, fp) - - # Strictly (according to RFC 2616), 301 or 302 in response to - # a POST MUST NOT cause a redirection without confirmation - # from the user (of urllib.request, in this case). In practice, - # essentially all clients do redirect in this case, so we do - # the same. - # be conciliant with URIs containing a space - newurl = newurl.replace(' ', '%20') - CONTENT_HEADERS = ("content-length", "content-type") - newheaders = dict((k, v) for k, v in req.headers.items() - if k.lower() not in CONTENT_HEADERS) - return Request(newurl, - headers=newheaders, - origin_req_host=req.origin_req_host, - unverifiable=True) - - # Implementation note: To avoid the server sending us into an - # infinite loop, the request object needs to track what URLs we - # have already seen. Do this by adding a handler-specific - # attribute to the Request object. - def http_error_302(self, req, fp, code, msg, headers): - # Some servers (incorrectly) return multiple Location headers - # (so probably same goes for URI). Use first header. - if "location" in headers: - newurl = headers["location"] - elif "uri" in headers: - newurl = headers["uri"] - else: - return - - # fix a possible malformed URL - urlparts = urlparse(newurl) - - # For security reasons we don't allow redirection to anything other - # than http, https or ftp. - - if urlparts.scheme not in ('http', 'https', 'ftp', ''): - raise HTTPError( - newurl, code, - "%s - Redirection to url '%s' is not allowed" % (msg, newurl), - headers, fp) - - if not urlparts.path: - urlparts = list(urlparts) - urlparts[2] = "/" - newurl = urlunparse(urlparts) - - newurl = urljoin(req.full_url, newurl) - - # XXX Probably want to forget about the state of the current - # request, although that might interact poorly with other - # handlers that also use handler-specific request attributes - new = self.redirect_request(req, fp, code, msg, headers, newurl) - if new is None: - return - - # loop detection - # .redirect_dict has a key url if url was previously visited. - if hasattr(req, 'redirect_dict'): - visited = new.redirect_dict = req.redirect_dict - if (visited.get(newurl, 0) >= self.max_repeats or - len(visited) >= self.max_redirections): - raise HTTPError(req.full_url, code, - self.inf_msg + msg, headers, fp) - else: - visited = new.redirect_dict = req.redirect_dict = {} - visited[newurl] = visited.get(newurl, 0) + 1 - - # Don't close the fp until we are sure that we won't use it - # with HTTPError. - fp.read() - fp.close() - - return self.parent.open(new, timeout=req.timeout) - - http_error_301 = http_error_303 = http_error_307 = http_error_302 - - inf_msg = "The HTTP server returned a redirect error that would " \ - "lead to an infinite loop.\n" \ - "The last 30x error message was:\n" - - -def _parse_proxy(proxy): - """Return (scheme, user, password, host/port) given a URL or an authority. - - If a URL is supplied, it must have an authority (host:port) component. - According to RFC 3986, having an authority component means the URL must - have two slashes after the scheme: - - >>> _parse_proxy('file:/ftp.example.com/') - Traceback (most recent call last): - ValueError: proxy URL with no authority: 'file:/ftp.example.com/' - - The first three items of the returned tuple may be None. - - Examples of authority parsing: - - >>> _parse_proxy('proxy.example.com') - (None, None, None, 'proxy.example.com') - >>> _parse_proxy('proxy.example.com:3128') - (None, None, None, 'proxy.example.com:3128') - - The authority component may optionally include userinfo (assumed to be - username:password): - - >>> _parse_proxy('joe:password@proxy.example.com') - (None, 'joe', 'password', 'proxy.example.com') - >>> _parse_proxy('joe:password@proxy.example.com:3128') - (None, 'joe', 'password', 'proxy.example.com:3128') - - Same examples, but with URLs instead: - - >>> _parse_proxy('http://proxy.example.com/') - ('http', None, None, 'proxy.example.com') - >>> _parse_proxy('http://proxy.example.com:3128/') - ('http', None, None, 'proxy.example.com:3128') - >>> _parse_proxy('http://joe:password@proxy.example.com/') - ('http', 'joe', 'password', 'proxy.example.com') - >>> _parse_proxy('http://joe:password@proxy.example.com:3128') - ('http', 'joe', 'password', 'proxy.example.com:3128') - - Everything after the authority is ignored: - - >>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128') - ('ftp', 'joe', 'password', 'proxy.example.com') - - Test for no trailing '/' case: - - >>> _parse_proxy('http://joe:password@proxy.example.com') - ('http', 'joe', 'password', 'proxy.example.com') - - """ - scheme, r_scheme = splittype(proxy) - if not r_scheme.startswith("/"): - # authority - scheme = None - authority = proxy - else: - # URL - if not r_scheme.startswith("//"): - raise ValueError("proxy URL with no authority: %r" % proxy) - # We have an authority, so for RFC 3986-compliant URLs (by ss 3. - # and 3.3.), path is empty or starts with '/' - end = r_scheme.find("/", 2) - if end == -1: - end = None - authority = r_scheme[2:end] - userinfo, hostport = splituser(authority) - if userinfo is not None: - user, password = splitpasswd(userinfo) - else: - user = password = None - return scheme, user, password, hostport - -class ProxyHandler(BaseHandler): - # Proxies must be in front - handler_order = 100 - - def __init__(self, proxies=None): - if proxies is None: - proxies = getproxies() - assert hasattr(proxies, 'keys'), "proxies must be a mapping" - self.proxies = proxies - for type, url in proxies.items(): - setattr(self, '%s_open' % type, - lambda r, proxy=url, type=type, meth=self.proxy_open: - meth(r, proxy, type)) - - def proxy_open(self, req, proxy, type): - orig_type = req.type - proxy_type, user, password, hostport = _parse_proxy(proxy) - if proxy_type is None: - proxy_type = orig_type - - if req.host and proxy_bypass(req.host): - return None - - if user and password: - user_pass = '%s:%s' % (unquote(user), - unquote(password)) - creds = base64.b64encode(user_pass.encode()).decode("ascii") - req.add_header('Proxy-authorization', 'Basic ' + creds) - hostport = unquote(hostport) - req.set_proxy(hostport, proxy_type) - if orig_type == proxy_type or orig_type == 'https': - # let other handlers take care of it - return None - else: - # need to start over, because the other handlers don't - # grok the proxy's URL type - # e.g. if we have a constructor arg proxies like so: - # {'http': 'ftp://proxy.example.com'}, we may end up turning - # a request for http://acme.example.com/a into one for - # ftp://proxy.example.com/a - return self.parent.open(req, timeout=req.timeout) - -class HTTPPasswordMgr(object): - - def __init__(self): - self.passwd = {} - - def add_password(self, realm, uri, user, passwd): - # uri could be a single URI or a sequence - if isinstance(uri, str): - uri = [uri] - if realm not in self.passwd: - self.passwd[realm] = {} - for default_port in True, False: - reduced_uri = tuple( - [self.reduce_uri(u, default_port) for u in uri]) - self.passwd[realm][reduced_uri] = (user, passwd) - - def find_user_password(self, realm, authuri): - domains = self.passwd.get(realm, {}) - for default_port in True, False: - reduced_authuri = self.reduce_uri(authuri, default_port) - for uris, authinfo in domains.items(): - for uri in uris: - if self.is_suburi(uri, reduced_authuri): - return authinfo - return None, None - - def reduce_uri(self, uri, default_port=True): - """Accept authority or URI and extract only the authority and path.""" - # note HTTP URLs do not have a userinfo component - parts = urlsplit(uri) - if parts[1]: - # URI - scheme = parts[0] - authority = parts[1] - path = parts[2] or '/' - else: - # host or host:port - scheme = None - authority = uri - path = '/' - host, port = splitport(authority) - if default_port and port is None and scheme is not None: - dport = {"http": 80, - "https": 443, - }.get(scheme) - if dport is not None: - authority = "%s:%d" % (host, dport) - return authority, path - - def is_suburi(self, base, test): - """Check if test is below base in a URI tree - - Both args must be URIs in reduced form. - """ - if base == test: - return True - if base[0] != test[0]: - return False - common = posixpath.commonprefix((base[1], test[1])) - if len(common) == len(base[1]): - return True - return False - - -class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): - - def find_user_password(self, realm, authuri): - user, password = HTTPPasswordMgr.find_user_password(self, realm, - authuri) - if user is not None: - return user, password - return HTTPPasswordMgr.find_user_password(self, None, authuri) - - -class AbstractBasicAuthHandler(object): - - # XXX this allows for multiple auth-schemes, but will stupidly pick - # the last one with a realm specified. - - # allow for double- and single-quoted realm values - # (single quotes are a violation of the RFC, but appear in the wild) - rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+' - 'realm=(["\']?)([^"\']*)\\2', re.I) - - # XXX could pre-emptively send auth info already accepted (RFC 2617, - # end of section 2, and section 1.2 immediately after "credentials" - # production). - - def __init__(self, password_mgr=None): - if password_mgr is None: - password_mgr = HTTPPasswordMgr() - self.passwd = password_mgr - self.add_password = self.passwd.add_password - self.retried = 0 - - def reset_retry_count(self): - self.retried = 0 - - def http_error_auth_reqed(self, authreq, host, req, headers): - # host may be an authority (without userinfo) or a URL with an - # authority - # XXX could be multiple headers - authreq = headers.get(authreq, None) - - if self.retried > 5: - # retry sending the username:password 5 times before failing. - raise HTTPError(req.get_full_url(), 401, "basic auth failed", - headers, None) - else: - self.retried += 1 - - if authreq: - scheme = authreq.split()[0] - if scheme.lower() != 'basic': - raise ValueError("AbstractBasicAuthHandler does not" - " support the following scheme: '%s'" % - scheme) - else: - mo = AbstractBasicAuthHandler.rx.search(authreq) - if mo: - scheme, quote, realm = mo.groups() - if quote not in ['"',"'"]: - warnings.warn("Basic Auth Realm was unquoted", - UserWarning, 2) - if scheme.lower() == 'basic': - response = self.retry_http_basic_auth(host, req, realm) - if response and response.code != 401: - self.retried = 0 - return response - - def retry_http_basic_auth(self, host, req, realm): - user, pw = self.passwd.find_user_password(realm, host) - if pw is not None: - raw = "%s:%s" % (user, pw) - auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii") - if req.headers.get(self.auth_header, None) == auth: - return None - req.add_unredirected_header(self.auth_header, auth) - return self.parent.open(req, timeout=req.timeout) - else: - return None - - -class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): - - auth_header = 'Authorization' - - def http_error_401(self, req, fp, code, msg, headers): - url = req.full_url - response = self.http_error_auth_reqed('www-authenticate', - url, req, headers) - self.reset_retry_count() - return response - - -class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): - - auth_header = 'Proxy-authorization' - - def http_error_407(self, req, fp, code, msg, headers): - # http_error_auth_reqed requires that there is no userinfo component in - # authority. Assume there isn't one, since urllib.request does not (and - # should not, RFC 3986 s. 3.2.1) support requests for URLs containing - # userinfo. - authority = req.host - response = self.http_error_auth_reqed('proxy-authenticate', - authority, req, headers) - self.reset_retry_count() - return response - - -# Return n random bytes. -_randombytes = os.urandom - - -class AbstractDigestAuthHandler(object): - # Digest authentication is specified in RFC 2617. - - # XXX The client does not inspect the Authentication-Info header - # in a successful response. - - # XXX It should be possible to test this implementation against - # a mock server that just generates a static set of challenges. - - # XXX qop="auth-int" supports is shaky - - def __init__(self, passwd=None): - if passwd is None: - passwd = HTTPPasswordMgr() - self.passwd = passwd - self.add_password = self.passwd.add_password - self.retried = 0 - self.nonce_count = 0 - self.last_nonce = None - - def reset_retry_count(self): - self.retried = 0 - - def http_error_auth_reqed(self, auth_header, host, req, headers): - authreq = headers.get(auth_header, None) - if self.retried > 5: - # Don't fail endlessly - if we failed once, we'll probably - # fail a second time. Hm. Unless the Password Manager is - # prompting for the information. Crap. This isn't great - # but it's better than the current 'repeat until recursion - # depth exceeded' approach - raise HTTPError(req.full_url, 401, "digest auth failed", - headers, None) - else: - self.retried += 1 - if authreq: - scheme = authreq.split()[0] - if scheme.lower() == 'digest': - return self.retry_http_digest_auth(req, authreq) - elif scheme.lower() != 'basic': - raise ValueError("AbstractDigestAuthHandler does not support" - " the following scheme: '%s'" % scheme) - - def retry_http_digest_auth(self, req, auth): - token, challenge = auth.split(' ', 1) - chal = parse_keqv_list(filter(None, parse_http_list(challenge))) - auth = self.get_authorization(req, chal) - if auth: - auth_val = 'Digest %s' % auth - if req.headers.get(self.auth_header, None) == auth_val: - return None - req.add_unredirected_header(self.auth_header, auth_val) - resp = self.parent.open(req, timeout=req.timeout) - return resp - - def get_cnonce(self, nonce): - # The cnonce-value is an opaque - # quoted string value provided by the client and used by both client - # and server to avoid chosen plaintext attacks, to provide mutual - # authentication, and to provide some message integrity protection. - # This isn't a fabulous effort, but it's probably Good Enough. - s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime()) - b = s.encode("ascii") + _randombytes(8) - dig = hashlib.sha1(b).hexdigest() - return dig[:16] - - def get_authorization(self, req, chal): - try: - realm = chal['realm'] - nonce = chal['nonce'] - qop = chal.get('qop') - algorithm = chal.get('algorithm', 'MD5') - # mod_digest doesn't send an opaque, even though it isn't - # supposed to be optional - opaque = chal.get('opaque', None) - except KeyError: - return None - - H, KD = self.get_algorithm_impls(algorithm) - if H is None: - return None - - user, pw = self.passwd.find_user_password(realm, req.full_url) - if user is None: - return None - - # XXX not implemented yet - if req.data is not None: - entdig = self.get_entity_digest(req.data, chal) - else: - entdig = None - - A1 = "%s:%s:%s" % (user, realm, pw) - A2 = "%s:%s" % (req.get_method(), - # XXX selector: what about proxies and full urls - req.selector) - if qop == 'auth': - if nonce == self.last_nonce: - self.nonce_count += 1 - else: - self.nonce_count = 1 - self.last_nonce = nonce - ncvalue = '%08x' % self.nonce_count - cnonce = self.get_cnonce(nonce) - noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2)) - respdig = KD(H(A1), noncebit) - elif qop is None: - respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) - else: - # XXX handle auth-int. - raise URLError("qop '%s' is not supported." % qop) - - # XXX should the partial digests be encoded too? - - base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ - 'response="%s"' % (user, realm, nonce, req.selector, - respdig) - if opaque: - base += ', opaque="%s"' % opaque - if entdig: - base += ', digest="%s"' % entdig - base += ', algorithm="%s"' % algorithm - if qop: - base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) - return base - - def get_algorithm_impls(self, algorithm): - # lambdas assume digest modules are imported at the top level - if algorithm == 'MD5': - H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest() - elif algorithm == 'SHA': - H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest() - # XXX MD5-sess - KD = lambda s, d: H("%s:%s" % (s, d)) - return H, KD - - def get_entity_digest(self, data, chal): - # XXX not implemented yet - return None - - -class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): - """An authentication protocol defined by RFC 2069 - - Digest authentication improves on basic authentication because it - does not transmit passwords in the clear. - """ - - auth_header = 'Authorization' - handler_order = 490 # before Basic auth - - def http_error_401(self, req, fp, code, msg, headers): - host = urlparse(req.full_url)[1] - retry = self.http_error_auth_reqed('www-authenticate', - host, req, headers) - self.reset_retry_count() - return retry - - -class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): - - auth_header = 'Proxy-Authorization' - handler_order = 490 # before Basic auth - - def http_error_407(self, req, fp, code, msg, headers): - host = req.host - retry = self.http_error_auth_reqed('proxy-authenticate', - host, req, headers) - self.reset_retry_count() - return retry - -class AbstractHTTPHandler(BaseHandler): - - def __init__(self, debuglevel=0): - self._debuglevel = debuglevel - - def set_http_debuglevel(self, level): - self._debuglevel = level - - def do_request_(self, request): - host = request.host - if not host: - raise URLError('no host given') - - if request.data is not None: # POST - data = request.data - if isinstance(data, str): - msg = "POST data should be bytes or an iterable of bytes. " \ - "It cannot be of type str." - raise TypeError(msg) - if not request.has_header('Content-type'): - request.add_unredirected_header( - 'Content-type', - 'application/x-www-form-urlencoded') - if not request.has_header('Content-length'): - size = None - try: - ### For Python-Future: - if PY2 and isinstance(data, array.array): - # memoryviews of arrays aren't supported - # in Py2.7. (e.g. memoryview(array.array('I', - # [1, 2, 3, 4])) raises a TypeError.) - # So we calculate the size manually instead: - size = len(data) * data.itemsize - ### - else: - mv = memoryview(data) - size = len(mv) * mv.itemsize - except TypeError: - if isinstance(data, Iterable): - raise ValueError("Content-Length should be specified " - "for iterable data of type %r %r" % (type(data), - data)) - else: - request.add_unredirected_header( - 'Content-length', '%d' % size) - - sel_host = host - if request.has_proxy(): - scheme, sel = splittype(request.selector) - sel_host, sel_path = splithost(sel) - if not request.has_header('Host'): - request.add_unredirected_header('Host', sel_host) - for name, value in self.parent.addheaders: - name = name.capitalize() - if not request.has_header(name): - request.add_unredirected_header(name, value) - - return request - - def do_open(self, http_class, req, **http_conn_args): - """Return an HTTPResponse object for the request, using http_class. - - http_class must implement the HTTPConnection API from http.client. - """ - host = req.host - if not host: - raise URLError('no host given') - - # will parse host:port - h = http_class(host, timeout=req.timeout, **http_conn_args) - - headers = dict(req.unredirected_hdrs) - headers.update(dict((k, v) for k, v in req.headers.items() - if k not in headers)) - - # TODO(jhylton): Should this be redesigned to handle - # persistent connections? - - # We want to make an HTTP/1.1 request, but the addinfourl - # class isn't prepared to deal with a persistent connection. - # It will try to read all remaining data from the socket, - # which will block while the server waits for the next request. - # So make sure the connection gets closed after the (only) - # request. - headers["Connection"] = "close" - headers = dict((name.title(), val) for name, val in headers.items()) - - if req._tunnel_host: - tunnel_headers = {} - proxy_auth_hdr = "Proxy-Authorization" - if proxy_auth_hdr in headers: - tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] - # Proxy-Authorization should not be sent to origin - # server. - del headers[proxy_auth_hdr] - h.set_tunnel(req._tunnel_host, headers=tunnel_headers) - - try: - h.request(req.get_method(), req.selector, req.data, headers) - except socket.error as err: # timeout error - h.close() - raise URLError(err) - else: - r = h.getresponse() - # If the server does not send us a 'Connection: close' header, - # HTTPConnection assumes the socket should be left open. Manually - # mark the socket to be closed when this response object goes away. - if h.sock: - h.sock.close() - h.sock = None - - - r.url = req.get_full_url() - # This line replaces the .msg attribute of the HTTPResponse - # with .headers, because urllib clients expect the response to - # have the reason in .msg. It would be good to mark this - # attribute is deprecated and get then to use info() or - # .headers. - r.msg = r.reason - return r - - -class HTTPHandler(AbstractHTTPHandler): - - def http_open(self, req): - return self.do_open(http_client.HTTPConnection, req) - - http_request = AbstractHTTPHandler.do_request_ - -if hasattr(http_client, 'HTTPSConnection'): - - class HTTPSHandler(AbstractHTTPHandler): - - def __init__(self, debuglevel=0, context=None, check_hostname=None): - AbstractHTTPHandler.__init__(self, debuglevel) - self._context = context - self._check_hostname = check_hostname - - def https_open(self, req): - return self.do_open(http_client.HTTPSConnection, req, - context=self._context, check_hostname=self._check_hostname) - - https_request = AbstractHTTPHandler.do_request_ - - __all__.append('HTTPSHandler') - -class HTTPCookieProcessor(BaseHandler): - def __init__(self, cookiejar=None): - import future.backports.http.cookiejar as http_cookiejar - if cookiejar is None: - cookiejar = http_cookiejar.CookieJar() - self.cookiejar = cookiejar - - def http_request(self, request): - self.cookiejar.add_cookie_header(request) - return request - - def http_response(self, request, response): - self.cookiejar.extract_cookies(response, request) - return response - - https_request = http_request - https_response = http_response - -class UnknownHandler(BaseHandler): - def unknown_open(self, req): - type = req.type - raise URLError('unknown url type: %s' % type) - -def parse_keqv_list(l): - """Parse list of key=value strings where keys are not duplicated.""" - parsed = {} - for elt in l: - k, v = elt.split('=', 1) - if v[0] == '"' and v[-1] == '"': - v = v[1:-1] - parsed[k] = v - return parsed - -def parse_http_list(s): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Neither commas nor quotes count if they are escaped. - Only double-quotes count, not single-quotes. - """ - res = [] - part = '' - - escape = quote = False - for cur in s: - if escape: - part += cur - escape = False - continue - if quote: - if cur == '\\': - escape = True - continue - elif cur == '"': - quote = False - part += cur - continue - - if cur == ',': - res.append(part) - part = '' - continue - - if cur == '"': - quote = True - - part += cur - - # append last part - if part: - res.append(part) - - return [part.strip() for part in res] - -class FileHandler(BaseHandler): - # Use local file or FTP depending on form of URL - def file_open(self, req): - url = req.selector - if url[:2] == '//' and url[2:3] != '/' and (req.host and - req.host != 'localhost'): - if not req.host is self.get_names(): - raise URLError("file:// scheme is supported only on localhost") - else: - return self.open_local_file(req) - - # names for the localhost - names = None - def get_names(self): - if FileHandler.names is None: - try: - FileHandler.names = tuple( - socket.gethostbyname_ex('localhost')[2] + - socket.gethostbyname_ex(socket.gethostname())[2]) - except socket.gaierror: - FileHandler.names = (socket.gethostbyname('localhost'),) - return FileHandler.names - - # not entirely sure what the rules are here - def open_local_file(self, req): - import future.backports.email.utils as email_utils - import mimetypes - host = req.host - filename = req.selector - localfile = url2pathname(filename) - try: - stats = os.stat(localfile) - size = stats.st_size - modified = email_utils.formatdate(stats.st_mtime, usegmt=True) - mtype = mimetypes.guess_type(filename)[0] - headers = email.message_from_string( - 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % - (mtype or 'text/plain', size, modified)) - if host: - host, port = splitport(host) - if not host or \ - (not port and _safe_gethostbyname(host) in self.get_names()): - if host: - origurl = 'file://' + host + filename - else: - origurl = 'file://' + filename - return addinfourl(open(localfile, 'rb'), headers, origurl) - except OSError as exp: - # users shouldn't expect OSErrors coming from urlopen() - raise URLError(exp) - raise URLError('file not on local host') - -def _safe_gethostbyname(host): - try: - return socket.gethostbyname(host) - except socket.gaierror: - return None - -class FTPHandler(BaseHandler): - def ftp_open(self, req): - import ftplib - import mimetypes - host = req.host - if not host: - raise URLError('ftp error: no host given') - host, port = splitport(host) - if port is None: - port = ftplib.FTP_PORT - else: - port = int(port) - - # username/password handling - user, host = splituser(host) - if user: - user, passwd = splitpasswd(user) - else: - passwd = None - host = unquote(host) - user = user or '' - passwd = passwd or '' - - try: - host = socket.gethostbyname(host) - except socket.error as msg: - raise URLError(msg) - path, attrs = splitattr(req.selector) - dirs = path.split('/') - dirs = list(map(unquote, dirs)) - dirs, file = dirs[:-1], dirs[-1] - if dirs and not dirs[0]: - dirs = dirs[1:] - try: - fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) - type = file and 'I' or 'D' - for attr in attrs: - attr, value = splitvalue(attr) - if attr.lower() == 'type' and \ - value in ('a', 'A', 'i', 'I', 'd', 'D'): - type = value.upper() - fp, retrlen = fw.retrfile(file, type) - headers = "" - mtype = mimetypes.guess_type(req.full_url)[0] - if mtype: - headers += "Content-type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-length: %d\n" % retrlen - headers = email.message_from_string(headers) - return addinfourl(fp, headers, req.full_url) - except ftplib.all_errors as exp: - exc = URLError('ftp error: %r' % exp) - raise_with_traceback(exc) - - def connect_ftp(self, user, passwd, host, port, dirs, timeout): - return ftpwrapper(user, passwd, host, port, dirs, timeout, - persistent=False) - -class CacheFTPHandler(FTPHandler): - # XXX would be nice to have pluggable cache strategies - # XXX this stuff is definitely not thread safe - def __init__(self): - self.cache = {} - self.timeout = {} - self.soonest = 0 - self.delay = 60 - self.max_conns = 16 - - def setTimeout(self, t): - self.delay = t - - def setMaxConns(self, m): - self.max_conns = m - - def connect_ftp(self, user, passwd, host, port, dirs, timeout): - key = user, host, port, '/'.join(dirs), timeout - if key in self.cache: - self.timeout[key] = time.time() + self.delay - else: - self.cache[key] = ftpwrapper(user, passwd, host, port, - dirs, timeout) - self.timeout[key] = time.time() + self.delay - self.check_cache() - return self.cache[key] - - def check_cache(self): - # first check for old ones - t = time.time() - if self.soonest <= t: - for k, v in list(self.timeout.items()): - if v < t: - self.cache[k].close() - del self.cache[k] - del self.timeout[k] - self.soonest = min(list(self.timeout.values())) - - # then check the size - if len(self.cache) == self.max_conns: - for k, v in list(self.timeout.items()): - if v == self.soonest: - del self.cache[k] - del self.timeout[k] - break - self.soonest = min(list(self.timeout.values())) - - def clear_cache(self): - for conn in self.cache.values(): - conn.close() - self.cache.clear() - self.timeout.clear() - - -# Code move from the old urllib module - -MAXFTPCACHE = 10 # Trim the ftp cache beyond this size - -# Helper for non-unix systems -if os.name == 'nt': - from nturl2path import url2pathname, pathname2url -else: - def url2pathname(pathname): - """OS-specific conversion from a relative URL of the 'file' scheme - to a file system path; not recommended for general use.""" - return unquote(pathname) - - def pathname2url(pathname): - """OS-specific conversion from a file system path to a relative URL - of the 'file' scheme; not recommended for general use.""" - return quote(pathname) - -# This really consists of two pieces: -# (1) a class which handles opening of all sorts of URLs -# (plus assorted utilities etc.) -# (2) a set of functions for parsing URLs -# XXX Should these be separated out into different modules? - - -ftpcache = {} -class URLopener(object): - """Class to open URLs. - This is a class rather than just a subroutine because we may need - more than one set of global protocol-specific options. - Note -- this is a base class for those who don't want the - automatic handling of errors type 302 (relocated) and 401 - (authorization needed).""" - - __tempfiles = None - - version = "Python-urllib/%s" % __version__ - - # Constructor - def __init__(self, proxies=None, **x509): - msg = "%(class)s style of invoking requests is deprecated. " \ - "Use newer urlopen functions/methods" % {'class': self.__class__.__name__} - warnings.warn(msg, DeprecationWarning, stacklevel=3) - if proxies is None: - proxies = getproxies() - assert hasattr(proxies, 'keys'), "proxies must be a mapping" - self.proxies = proxies - self.key_file = x509.get('key_file') - self.cert_file = x509.get('cert_file') - self.addheaders = [('User-Agent', self.version)] - self.__tempfiles = [] - self.__unlink = os.unlink # See cleanup() - self.tempcache = None - # Undocumented feature: if you assign {} to tempcache, - # it is used to cache files retrieved with - # self.retrieve(). This is not enabled by default - # since it does not work for changing documents (and I - # haven't got the logic to check expiration headers - # yet). - self.ftpcache = ftpcache - # Undocumented feature: you can use a different - # ftp cache by assigning to the .ftpcache member; - # in case you want logically independent URL openers - # XXX This is not threadsafe. Bah. - - def __del__(self): - self.close() - - def close(self): - self.cleanup() - - def cleanup(self): - # This code sometimes runs when the rest of this module - # has already been deleted, so it can't use any globals - # or import anything. - if self.__tempfiles: - for file in self.__tempfiles: - try: - self.__unlink(file) - except OSError: - pass - del self.__tempfiles[:] - if self.tempcache: - self.tempcache.clear() - - def addheader(self, *args): - """Add a header to be used by the HTTP interface only - e.g. u.addheader('Accept', 'sound/basic')""" - self.addheaders.append(args) - - # External interface - def open(self, fullurl, data=None): - """Use URLopener().open(file) instead of open(file, 'r').""" - fullurl = unwrap(to_bytes(fullurl)) - fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|") - if self.tempcache and fullurl in self.tempcache: - filename, headers = self.tempcache[fullurl] - fp = open(filename, 'rb') - return addinfourl(fp, headers, fullurl) - urltype, url = splittype(fullurl) - if not urltype: - urltype = 'file' - if urltype in self.proxies: - proxy = self.proxies[urltype] - urltype, proxyhost = splittype(proxy) - host, selector = splithost(proxyhost) - url = (host, fullurl) # Signal special case to open_*() - else: - proxy = None - name = 'open_' + urltype - self.type = urltype - name = name.replace('-', '_') - if not hasattr(self, name): - if proxy: - return self.open_unknown_proxy(proxy, fullurl, data) - else: - return self.open_unknown(fullurl, data) - try: - if data is None: - return getattr(self, name)(url) - else: - return getattr(self, name)(url, data) - except HTTPError: - raise - except socket.error as msg: - raise_with_traceback(IOError('socket error', msg)) - - def open_unknown(self, fullurl, data=None): - """Overridable interface to open unknown URL type.""" - type, url = splittype(fullurl) - raise IOError('url error', 'unknown url type', type) - - def open_unknown_proxy(self, proxy, fullurl, data=None): - """Overridable interface to open unknown URL type.""" - type, url = splittype(fullurl) - raise IOError('url error', 'invalid proxy for %s' % type, proxy) - - # External interface - def retrieve(self, url, filename=None, reporthook=None, data=None): - """retrieve(url) returns (filename, headers) for a local object - or (tempfilename, headers) for a remote object.""" - url = unwrap(to_bytes(url)) - if self.tempcache and url in self.tempcache: - return self.tempcache[url] - type, url1 = splittype(url) - if filename is None and (not type or type == 'file'): - try: - fp = self.open_local_file(url1) - hdrs = fp.info() - fp.close() - return url2pathname(splithost(url1)[1]), hdrs - except IOError as msg: - pass - fp = self.open(url, data) - try: - headers = fp.info() - if filename: - tfp = open(filename, 'wb') - else: - import tempfile - garbage, path = splittype(url) - garbage, path = splithost(path or "") - path, garbage = splitquery(path or "") - path, garbage = splitattr(path or "") - suffix = os.path.splitext(path)[1] - (fd, filename) = tempfile.mkstemp(suffix) - self.__tempfiles.append(filename) - tfp = os.fdopen(fd, 'wb') - try: - result = filename, headers - if self.tempcache is not None: - self.tempcache[url] = result - bs = 1024*8 - size = -1 - read = 0 - blocknum = 0 - if "content-length" in headers: - size = int(headers["Content-Length"]) - if reporthook: - reporthook(blocknum, bs, size) - while 1: - block = fp.read(bs) - if not block: - break - read += len(block) - tfp.write(block) - blocknum += 1 - if reporthook: - reporthook(blocknum, bs, size) - finally: - tfp.close() - finally: - fp.close() - - # raise exception if actual size does not match content-length header - if size >= 0 and read < size: - raise ContentTooShortError( - "retrieval incomplete: got only %i out of %i bytes" - % (read, size), result) - - return result - - # Each method named open_ knows how to open that type of URL - - def _open_generic_http(self, connection_factory, url, data): - """Make an HTTP connection using connection_class. - - This is an internal method that should be called from - open_http() or open_https(). - - Arguments: - - connection_factory should take a host name and return an - HTTPConnection instance. - - url is the url to retrieval or a host, relative-path pair. - - data is payload for a POST request or None. - """ - - user_passwd = None - proxy_passwd= None - if isinstance(url, str): - host, selector = splithost(url) - if host: - user_passwd, host = splituser(host) - host = unquote(host) - realhost = host - else: - host, selector = url - # check whether the proxy contains authorization information - proxy_passwd, host = splituser(host) - # now we proceed with the url we want to obtain - urltype, rest = splittype(selector) - url = rest - user_passwd = None - if urltype.lower() != 'http': - realhost = None - else: - realhost, rest = splithost(rest) - if realhost: - user_passwd, realhost = splituser(realhost) - if user_passwd: - selector = "%s://%s%s" % (urltype, realhost, rest) - if proxy_bypass(realhost): - host = realhost - - if not host: raise IOError('http error', 'no host given') - - if proxy_passwd: - proxy_passwd = unquote(proxy_passwd) - proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') - else: - proxy_auth = None - - if user_passwd: - user_passwd = unquote(user_passwd) - auth = base64.b64encode(user_passwd.encode()).decode('ascii') - else: - auth = None - http_conn = connection_factory(host) - headers = {} - if proxy_auth: - headers["Proxy-Authorization"] = "Basic %s" % proxy_auth - if auth: - headers["Authorization"] = "Basic %s" % auth - if realhost: - headers["Host"] = realhost - - # Add Connection:close as we don't support persistent connections yet. - # This helps in closing the socket and avoiding ResourceWarning - - headers["Connection"] = "close" - - for header, value in self.addheaders: - headers[header] = value - - if data is not None: - headers["Content-Type"] = "application/x-www-form-urlencoded" - http_conn.request("POST", selector, data, headers) - else: - http_conn.request("GET", selector, headers=headers) - - try: - response = http_conn.getresponse() - except http_client.BadStatusLine: - # something went wrong with the HTTP status line - raise URLError("http protocol error: bad status line") - - # According to RFC 2616, "2xx" code indicates that the client's - # request was successfully received, understood, and accepted. - if 200 <= response.status < 300: - return addinfourl(response, response.msg, "http:" + url, - response.status) - else: - return self.http_error( - url, response.fp, - response.status, response.reason, response.msg, data) - - def open_http(self, url, data=None): - """Use HTTP protocol.""" - return self._open_generic_http(http_client.HTTPConnection, url, data) - - def http_error(self, url, fp, errcode, errmsg, headers, data=None): - """Handle http errors. - - Derived class can override this, or provide specific handlers - named http_error_DDD where DDD is the 3-digit error code.""" - # First check if there's a specific handler for this error - name = 'http_error_%d' % errcode - if hasattr(self, name): - method = getattr(self, name) - if data is None: - result = method(url, fp, errcode, errmsg, headers) - else: - result = method(url, fp, errcode, errmsg, headers, data) - if result: return result - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_default(self, url, fp, errcode, errmsg, headers): - """Default error handler: close the connection and raise IOError.""" - fp.close() - raise HTTPError(url, errcode, errmsg, headers, None) - - if _have_ssl: - def _https_connection(self, host): - return http_client.HTTPSConnection(host, - key_file=self.key_file, - cert_file=self.cert_file) - - def open_https(self, url, data=None): - """Use HTTPS protocol.""" - return self._open_generic_http(self._https_connection, url, data) - - def open_file(self, url): - """Use local file or FTP depending on form of URL.""" - if not isinstance(url, str): - raise URLError('file error: proxy support for file protocol currently not implemented') - if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/': - raise ValueError("file:// scheme is supported only on localhost") - else: - return self.open_local_file(url) - - def open_local_file(self, url): - """Use local file.""" - import future.backports.email.utils as email_utils - import mimetypes - host, file = splithost(url) - localname = url2pathname(file) - try: - stats = os.stat(localname) - except OSError as e: - raise URLError(e.strerror, e.filename) - size = stats.st_size - modified = email_utils.formatdate(stats.st_mtime, usegmt=True) - mtype = mimetypes.guess_type(url)[0] - headers = email.message_from_string( - 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' % - (mtype or 'text/plain', size, modified)) - if not host: - urlfile = file - if file[:1] == '/': - urlfile = 'file://' + file - return addinfourl(open(localname, 'rb'), headers, urlfile) - host, port = splitport(host) - if (not port - and socket.gethostbyname(host) in ((localhost(),) + thishost())): - urlfile = file - if file[:1] == '/': - urlfile = 'file://' + file - elif file[:2] == './': - raise ValueError("local file url may start with / or file:. Unknown url of type: %s" % url) - return addinfourl(open(localname, 'rb'), headers, urlfile) - raise URLError('local file error: not on local host') - - def open_ftp(self, url): - """Use FTP protocol.""" - if not isinstance(url, str): - raise URLError('ftp error: proxy support for ftp protocol currently not implemented') - import mimetypes - host, path = splithost(url) - if not host: raise URLError('ftp error: no host given') - host, port = splitport(host) - user, host = splituser(host) - if user: user, passwd = splitpasswd(user) - else: passwd = None - host = unquote(host) - user = unquote(user or '') - passwd = unquote(passwd or '') - host = socket.gethostbyname(host) - if not port: - import ftplib - port = ftplib.FTP_PORT - else: - port = int(port) - path, attrs = splitattr(path) - path = unquote(path) - dirs = path.split('/') - dirs, file = dirs[:-1], dirs[-1] - if dirs and not dirs[0]: dirs = dirs[1:] - if dirs and not dirs[0]: dirs[0] = '/' - key = user, host, port, '/'.join(dirs) - # XXX thread unsafe! - if len(self.ftpcache) > MAXFTPCACHE: - # Prune the cache, rather arbitrarily - for k in self.ftpcache.keys(): - if k != key: - v = self.ftpcache[k] - del self.ftpcache[k] - v.close() - try: - if key not in self.ftpcache: - self.ftpcache[key] = \ - ftpwrapper(user, passwd, host, port, dirs) - if not file: type = 'D' - else: type = 'I' - for attr in attrs: - attr, value = splitvalue(attr) - if attr.lower() == 'type' and \ - value in ('a', 'A', 'i', 'I', 'd', 'D'): - type = value.upper() - (fp, retrlen) = self.ftpcache[key].retrfile(file, type) - mtype = mimetypes.guess_type("ftp:" + url)[0] - headers = "" - if mtype: - headers += "Content-Type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-Length: %d\n" % retrlen - headers = email.message_from_string(headers) - return addinfourl(fp, headers, "ftp:" + url) - except ftperrors() as exp: - raise_with_traceback(URLError('ftp error %r' % exp)) - - def open_data(self, url, data=None): - """Use "data" URL.""" - if not isinstance(url, str): - raise URLError('data error: proxy support for data protocol currently not implemented') - # ignore POSTed data - # - # syntax of data URLs: - # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data - # mediatype := [ type "/" subtype ] *( ";" parameter ) - # data := *urlchar - # parameter := attribute "=" value - try: - [type, data] = url.split(',', 1) - except ValueError: - raise IOError('data error', 'bad data URL') - if not type: - type = 'text/plain;charset=US-ASCII' - semi = type.rfind(';') - if semi >= 0 and '=' not in type[semi:]: - encoding = type[semi+1:] - type = type[:semi] - else: - encoding = '' - msg = [] - msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT', - time.gmtime(time.time()))) - msg.append('Content-type: %s' % type) - if encoding == 'base64': - # XXX is this encoding/decoding ok? - data = base64.decodebytes(data.encode('ascii')).decode('latin-1') - else: - data = unquote(data) - msg.append('Content-Length: %d' % len(data)) - msg.append('') - msg.append(data) - msg = '\n'.join(msg) - headers = email.message_from_string(msg) - f = io.StringIO(msg) - #f.fileno = None # needed for addinfourl - return addinfourl(f, headers, url) - - -class FancyURLopener(URLopener): - """Derived class with handlers for errors we can handle (perhaps).""" - - def __init__(self, *args, **kwargs): - URLopener.__init__(self, *args, **kwargs) - self.auth_cache = {} - self.tries = 0 - self.maxtries = 10 - - def http_error_default(self, url, fp, errcode, errmsg, headers): - """Default error handling -- don't raise an exception.""" - return addinfourl(fp, headers, "http:" + url, errcode) - - def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): - """Error 302 -- relocated (temporarily).""" - self.tries += 1 - if self.maxtries and self.tries >= self.maxtries: - if hasattr(self, "http_error_500"): - meth = self.http_error_500 - else: - meth = self.http_error_default - self.tries = 0 - return meth(url, fp, 500, - "Internal Server Error: Redirect Recursion", headers) - result = self.redirect_internal(url, fp, errcode, errmsg, headers, - data) - self.tries = 0 - return result - - def redirect_internal(self, url, fp, errcode, errmsg, headers, data): - if 'location' in headers: - newurl = headers['location'] - elif 'uri' in headers: - newurl = headers['uri'] - else: - return - fp.close() - - # In case the server sent a relative URL, join with original: - newurl = urljoin(self.type + ":" + url, newurl) - - urlparts = urlparse(newurl) - - # For security reasons, we don't allow redirection to anything other - # than http, https and ftp. - - # We are using newer HTTPError with older redirect_internal method - # This older method will get deprecated in 3.3 - - if urlparts.scheme not in ('http', 'https', 'ftp', ''): - raise HTTPError(newurl, errcode, - errmsg + - " Redirection to url '%s' is not allowed." % newurl, - headers, fp) - - return self.open(newurl) - - def http_error_301(self, url, fp, errcode, errmsg, headers, data=None): - """Error 301 -- also relocated (permanently).""" - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - - def http_error_303(self, url, fp, errcode, errmsg, headers, data=None): - """Error 303 -- also relocated (essentially identical to 302).""" - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - - def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): - """Error 307 -- relocated, but turn POST into error.""" - if data is None: - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - else: - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_401(self, url, fp, errcode, errmsg, headers, data=None, - retry=False): - """Error 401 -- authentication required. - This function supports Basic authentication only.""" - if 'www-authenticate' not in headers: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - stuff = headers['www-authenticate'] - match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) - if not match: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - scheme, realm = match.groups() - if scheme.lower() != 'basic': - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - if not retry: - URLopener.http_error_default(self, url, fp, errcode, errmsg, - headers) - name = 'retry_' + self.type + '_basic_auth' - if data is None: - return getattr(self,name)(url, realm) - else: - return getattr(self,name)(url, realm, data) - - def http_error_407(self, url, fp, errcode, errmsg, headers, data=None, - retry=False): - """Error 407 -- proxy authentication required. - This function supports Basic authentication only.""" - if 'proxy-authenticate' not in headers: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - stuff = headers['proxy-authenticate'] - match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) - if not match: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - scheme, realm = match.groups() - if scheme.lower() != 'basic': - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - if not retry: - URLopener.http_error_default(self, url, fp, errcode, errmsg, - headers) - name = 'retry_proxy_' + self.type + '_basic_auth' - if data is None: - return getattr(self,name)(url, realm) - else: - return getattr(self,name)(url, realm, data) - - def retry_proxy_http_basic_auth(self, url, realm, data=None): - host, selector = splithost(url) - newurl = 'http://' + host + selector - proxy = self.proxies['http'] - urltype, proxyhost = splittype(proxy) - proxyhost, proxyselector = splithost(proxyhost) - i = proxyhost.find('@') + 1 - proxyhost = proxyhost[i:] - user, passwd = self.get_user_passwd(proxyhost, realm, i) - if not (user or passwd): return None - proxyhost = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), proxyhost) - self.proxies['http'] = 'http://' + proxyhost + proxyselector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_proxy_https_basic_auth(self, url, realm, data=None): - host, selector = splithost(url) - newurl = 'https://' + host + selector - proxy = self.proxies['https'] - urltype, proxyhost = splittype(proxy) - proxyhost, proxyselector = splithost(proxyhost) - i = proxyhost.find('@') + 1 - proxyhost = proxyhost[i:] - user, passwd = self.get_user_passwd(proxyhost, realm, i) - if not (user or passwd): return None - proxyhost = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), proxyhost) - self.proxies['https'] = 'https://' + proxyhost + proxyselector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_http_basic_auth(self, url, realm, data=None): - host, selector = splithost(url) - i = host.find('@') + 1 - host = host[i:] - user, passwd = self.get_user_passwd(host, realm, i) - if not (user or passwd): return None - host = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), host) - newurl = 'http://' + host + selector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_https_basic_auth(self, url, realm, data=None): - host, selector = splithost(url) - i = host.find('@') + 1 - host = host[i:] - user, passwd = self.get_user_passwd(host, realm, i) - if not (user or passwd): return None - host = "%s:%s@%s" % (quote(user, safe=''), - quote(passwd, safe=''), host) - newurl = 'https://' + host + selector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def get_user_passwd(self, host, realm, clear_cache=0): - key = realm + '@' + host.lower() - if key in self.auth_cache: - if clear_cache: - del self.auth_cache[key] - else: - return self.auth_cache[key] - user, passwd = self.prompt_user_passwd(host, realm) - if user or passwd: self.auth_cache[key] = (user, passwd) - return user, passwd - - def prompt_user_passwd(self, host, realm): - """Override this in a GUI environment!""" - import getpass - try: - user = input("Enter username for %s at %s: " % (realm, host)) - passwd = getpass.getpass("Enter password for %s in %s at %s: " % - (user, realm, host)) - return user, passwd - except KeyboardInterrupt: - print() - return None, None - - -# Utility functions - -_localhost = None -def localhost(): - """Return the IP address of the magic hostname 'localhost'.""" - global _localhost - if _localhost is None: - _localhost = socket.gethostbyname('localhost') - return _localhost - -_thishost = None -def thishost(): - """Return the IP addresses of the current host.""" - global _thishost - if _thishost is None: - try: - _thishost = tuple(socket.gethostbyname_ex(socket.gethostname())[2]) - except socket.gaierror: - _thishost = tuple(socket.gethostbyname_ex('localhost')[2]) - return _thishost - -_ftperrors = None -def ftperrors(): - """Return the set of errors raised by the FTP class.""" - global _ftperrors - if _ftperrors is None: - import ftplib - _ftperrors = ftplib.all_errors - return _ftperrors - -_noheaders = None -def noheaders(): - """Return an empty email Message object.""" - global _noheaders - if _noheaders is None: - _noheaders = email.message_from_string("") - return _noheaders - - -# Utility classes - -class ftpwrapper(object): - """Class used by open_ftp() for cache of open FTP connections.""" - - def __init__(self, user, passwd, host, port, dirs, timeout=None, - persistent=True): - self.user = user - self.passwd = passwd - self.host = host - self.port = port - self.dirs = dirs - self.timeout = timeout - self.refcount = 0 - self.keepalive = persistent - self.init() - - def init(self): - import ftplib - self.busy = 0 - self.ftp = ftplib.FTP() - self.ftp.connect(self.host, self.port, self.timeout) - self.ftp.login(self.user, self.passwd) - _target = '/'.join(self.dirs) - self.ftp.cwd(_target) - - def retrfile(self, file, type): - import ftplib - self.endtransfer() - if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 - else: cmd = 'TYPE ' + type; isdir = 0 - try: - self.ftp.voidcmd(cmd) - except ftplib.all_errors: - self.init() - self.ftp.voidcmd(cmd) - conn = None - if file and not isdir: - # Try to retrieve as a file - try: - cmd = 'RETR ' + file - conn, retrlen = self.ftp.ntransfercmd(cmd) - except ftplib.error_perm as reason: - if str(reason)[:3] != '550': - raise_with_traceback(URLError('ftp error: %r' % reason)) - if not conn: - # Set transfer mode to ASCII! - self.ftp.voidcmd('TYPE A') - # Try a directory listing. Verify that directory exists. - if file: - pwd = self.ftp.pwd() - try: - try: - self.ftp.cwd(file) - except ftplib.error_perm as reason: - ### Was: - # raise URLError('ftp error: %r' % reason) from reason - exc = URLError('ftp error: %r' % reason) - exc.__cause__ = reason - raise exc - finally: - self.ftp.cwd(pwd) - cmd = 'LIST ' + file - else: - cmd = 'LIST' - conn, retrlen = self.ftp.ntransfercmd(cmd) - self.busy = 1 - - ftpobj = addclosehook(conn.makefile('rb'), self.file_close) - self.refcount += 1 - conn.close() - # Pass back both a suitably decorated object and a retrieval length - return (ftpobj, retrlen) - - def endtransfer(self): - self.busy = 0 - - def close(self): - self.keepalive = False - if self.refcount <= 0: - self.real_close() - - def file_close(self): - self.endtransfer() - self.refcount -= 1 - if self.refcount <= 0 and not self.keepalive: - self.real_close() - - def real_close(self): - self.endtransfer() - try: - self.ftp.close() - except ftperrors(): - pass - -# Proxy handling -def getproxies_environment(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Scan the environment for variables named _proxy; - this seems to be the standard convention. If you need a - different way, you can pass a proxies dictionary to the - [Fancy]URLopener constructor. - - """ - proxies = {} - for name, value in os.environ.items(): - name = name.lower() - if value and name[-6:] == '_proxy': - proxies[name[:-6]] = value - return proxies - -def proxy_bypass_environment(host): - """Test if proxies should not be used for a particular host. - - Checks the environment for a variable named no_proxy, which should - be a list of DNS suffixes separated by commas, or '*' for all hosts. - """ - no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') - # '*' is special case for always bypass - if no_proxy == '*': - return 1 - # strip port off host - hostonly, port = splitport(host) - # check if the host ends with any of the DNS suffixes - no_proxy_list = [proxy.strip() for proxy in no_proxy.split(',')] - for name in no_proxy_list: - if name and (hostonly.endswith(name) or host.endswith(name)): - return 1 - # otherwise, don't bypass - return 0 - - -# This code tests an OSX specific data structure but is testable on all -# platforms -def _proxy_bypass_macosx_sysconf(host, proxy_settings): - """ - Return True iff this host shouldn't be accessed using a proxy - - This function uses the MacOSX framework SystemConfiguration - to fetch the proxy information. - - proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: - { 'exclude_simple': bool, - 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] - } - """ - from fnmatch import fnmatch - - hostonly, port = splitport(host) - - def ip2num(ipAddr): - parts = ipAddr.split('.') - parts = list(map(int, parts)) - if len(parts) != 4: - parts = (parts + [0, 0, 0, 0])[:4] - return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] - - # Check for simple host names: - if '.' not in host: - if proxy_settings['exclude_simple']: - return True - - hostIP = None - - for value in proxy_settings.get('exceptions', ()): - # Items in the list are strings like these: *.local, 169.254/16 - if not value: continue - - m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) - if m is not None: - if hostIP is None: - try: - hostIP = socket.gethostbyname(hostonly) - hostIP = ip2num(hostIP) - except socket.error: - continue - - base = ip2num(m.group(1)) - mask = m.group(2) - if mask is None: - mask = 8 * (m.group(1).count('.') + 1) - else: - mask = int(mask[1:]) - mask = 32 - mask - - if (hostIP >> mask) == (base >> mask): - return True - - elif fnmatch(host, value): - return True - - return False - - -if sys.platform == 'darwin': - from _scproxy import _get_proxy_settings, _get_proxies - - def proxy_bypass_macosx_sysconf(host): - proxy_settings = _get_proxy_settings() - return _proxy_bypass_macosx_sysconf(host, proxy_settings) - - def getproxies_macosx_sysconf(): - """Return a dictionary of scheme -> proxy server URL mappings. - - This function uses the MacOSX framework SystemConfiguration - to fetch the proxy information. - """ - return _get_proxies() - - - - def proxy_bypass(host): - if getproxies_environment(): - return proxy_bypass_environment(host) - else: - return proxy_bypass_macosx_sysconf(host) - - def getproxies(): - return getproxies_environment() or getproxies_macosx_sysconf() - - -elif os.name == 'nt': - def getproxies_registry(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Win32 uses the registry to store proxies. - - """ - proxies = {} - try: - import winreg - except ImportError: - # Std module, so should be around - but you never know! - return proxies - try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - proxyEnable = winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0] - if proxyEnable: - # Returned as Unicode but problems if not converted to ASCII - proxyServer = str(winreg.QueryValueEx(internetSettings, - 'ProxyServer')[0]) - if '=' in proxyServer: - # Per-protocol settings - for p in proxyServer.split(';'): - protocol, address = p.split('=', 1) - # See if address has a type:// prefix - if not re.match('^([^/:]+)://', address): - address = '%s://%s' % (protocol, address) - proxies[protocol] = address - else: - # Use one setting for all protocols - if proxyServer[:5] == 'http:': - proxies['http'] = proxyServer - else: - proxies['http'] = 'http://%s' % proxyServer - proxies['https'] = 'https://%s' % proxyServer - proxies['ftp'] = 'ftp://%s' % proxyServer - internetSettings.Close() - except (WindowsError, ValueError, TypeError): - # Either registry key not found etc, or the value in an - # unexpected format. - # proxies already set up to be empty so nothing to do - pass - return proxies - - def getproxies(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Returns settings gathered from the environment, if specified, - or the registry. - - """ - return getproxies_environment() or getproxies_registry() - - def proxy_bypass_registry(host): - try: - import winreg - except ImportError: - # Std modules, so should be around - but you never know! - return 0 - try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - proxyEnable = winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0] - proxyOverride = str(winreg.QueryValueEx(internetSettings, - 'ProxyOverride')[0]) - # ^^^^ Returned as Unicode but problems if not converted to ASCII - except WindowsError: - return 0 - if not proxyEnable or not proxyOverride: - return 0 - # try to make a host list from name and IP address. - rawHost, port = splitport(host) - host = [rawHost] - try: - addr = socket.gethostbyname(rawHost) - if addr != rawHost: - host.append(addr) - except socket.error: - pass - try: - fqdn = socket.getfqdn(rawHost) - if fqdn != rawHost: - host.append(fqdn) - except socket.error: - pass - # make a check value list from the registry entry: replace the - # '' string by the localhost entry and the corresponding - # canonical entry. - proxyOverride = proxyOverride.split(';') - # now check if we match one of the registry values. - for test in proxyOverride: - if test == '': - if '.' not in rawHost: - return 1 - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char - for val in host: - if re.match(test, val, re.I): - return 1 - return 0 - - def proxy_bypass(host): - """Return a dictionary of scheme -> proxy server URL mappings. - - Returns settings gathered from the environment, if specified, - or the registry. - - """ - if getproxies_environment(): - return proxy_bypass_environment(host) - else: - return proxy_bypass_registry(host) - -else: - # By default use environment variables - getproxies = getproxies_environment - proxy_bypass = proxy_bypass_environment diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/response.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/response.py deleted file mode 100755 index adbf6e5..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/response.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Response classes used by urllib. - -The base class, addbase, defines a minimal file-like interface, -including read() and readline(). The typical response object is an -addinfourl instance, which defines an info() method that returns -headers and a geturl() method that returns the url. -""" -from __future__ import absolute_import, division, unicode_literals -from future.builtins import object - -class addbase(object): - """Base class for addinfo and addclosehook.""" - - # XXX Add a method to expose the timeout on the underlying socket? - - def __init__(self, fp): - # TODO(jhylton): Is there a better way to delegate using io? - self.fp = fp - self.read = self.fp.read - self.readline = self.fp.readline - # TODO(jhylton): Make sure an object with readlines() is also iterable - if hasattr(self.fp, "readlines"): - self.readlines = self.fp.readlines - if hasattr(self.fp, "fileno"): - self.fileno = self.fp.fileno - else: - self.fileno = lambda: None - - def __iter__(self): - # Assigning `__iter__` to the instance doesn't work as intended - # because the iter builtin does something like `cls.__iter__(obj)` - # and thus fails to find the _bound_ method `obj.__iter__`. - # Returning just `self.fp` works for built-in file objects but - # might not work for general file-like objects. - return iter(self.fp) - - def __repr__(self): - return '<%s at %r whose fp = %r>' % (self.__class__.__name__, - id(self), self.fp) - - def close(self): - if self.fp: - self.fp.close() - self.fp = None - self.read = None - self.readline = None - self.readlines = None - self.fileno = None - self.__iter__ = None - self.__next__ = None - - def __enter__(self): - if self.fp is None: - raise ValueError("I/O operation on closed file") - return self - - def __exit__(self, type, value, traceback): - self.close() - -class addclosehook(addbase): - """Class to add a close hook to an open file.""" - - def __init__(self, fp, closehook, *hookargs): - addbase.__init__(self, fp) - self.closehook = closehook - self.hookargs = hookargs - - def close(self): - if self.closehook: - self.closehook(*self.hookargs) - self.closehook = None - self.hookargs = None - addbase.close(self) - -class addinfo(addbase): - """class to add an info() method to an open file.""" - - def __init__(self, fp, headers): - addbase.__init__(self, fp) - self.headers = headers - - def info(self): - return self.headers - -class addinfourl(addbase): - """class to add info() and geturl() methods to an open file.""" - - def __init__(self, fp, headers, url, code=None): - addbase.__init__(self, fp) - self.headers = headers - self.url = url - self.code = code - - def info(self): - return self.headers - - def getcode(self): - return self.code - - def geturl(self): - return self.url - -del absolute_import, division, unicode_literals, object diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/robotparser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/robotparser.py deleted file mode 100755 index a0f3651..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/robotparser.py +++ /dev/null @@ -1,211 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from future.builtins import str -""" robotparser.py - - Copyright (C) 2000 Bastian Kleineidam - - You can choose between two licenses when using this package: - 1) GNU GPLv2 - 2) PSF license for Python 2.2 - - The robots.txt Exclusion Protocol is implemented as specified in - http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html -""" - -# Was: import urllib.parse, urllib.request -from future.backports import urllib -from future.backports.urllib import parse as _parse, request as _request -urllib.parse = _parse -urllib.request = _request - - -__all__ = ["RobotFileParser"] - -class RobotFileParser(object): - """ This class provides a set of methods to read, parse and answer - questions about a single robots.txt file. - - """ - - def __init__(self, url=''): - self.entries = [] - self.default_entry = None - self.disallow_all = False - self.allow_all = False - self.set_url(url) - self.last_checked = 0 - - def mtime(self): - """Returns the time the robots.txt file was last fetched. - - This is useful for long-running web spiders that need to - check for new robots.txt files periodically. - - """ - return self.last_checked - - def modified(self): - """Sets the time the robots.txt file was last fetched to the - current time. - - """ - import time - self.last_checked = time.time() - - def set_url(self, url): - """Sets the URL referring to a robots.txt file.""" - self.url = url - self.host, self.path = urllib.parse.urlparse(url)[1:3] - - def read(self): - """Reads the robots.txt URL and feeds it to the parser.""" - try: - f = urllib.request.urlopen(self.url) - except urllib.error.HTTPError as err: - if err.code in (401, 403): - self.disallow_all = True - elif err.code >= 400: - self.allow_all = True - else: - raw = f.read() - self.parse(raw.decode("utf-8").splitlines()) - - def _add_entry(self, entry): - if "*" in entry.useragents: - # the default entry is considered last - if self.default_entry is None: - # the first default entry wins - self.default_entry = entry - else: - self.entries.append(entry) - - def parse(self, lines): - """Parse the input lines from a robots.txt file. - - We allow that a user-agent: line is not preceded by - one or more blank lines. - """ - # states: - # 0: start state - # 1: saw user-agent line - # 2: saw an allow or disallow line - state = 0 - entry = Entry() - - for line in lines: - if not line: - if state == 1: - entry = Entry() - state = 0 - elif state == 2: - self._add_entry(entry) - entry = Entry() - state = 0 - # remove optional comment and strip line - i = line.find('#') - if i >= 0: - line = line[:i] - line = line.strip() - if not line: - continue - line = line.split(':', 1) - if len(line) == 2: - line[0] = line[0].strip().lower() - line[1] = urllib.parse.unquote(line[1].strip()) - if line[0] == "user-agent": - if state == 2: - self._add_entry(entry) - entry = Entry() - entry.useragents.append(line[1]) - state = 1 - elif line[0] == "disallow": - if state != 0: - entry.rulelines.append(RuleLine(line[1], False)) - state = 2 - elif line[0] == "allow": - if state != 0: - entry.rulelines.append(RuleLine(line[1], True)) - state = 2 - if state == 2: - self._add_entry(entry) - - - def can_fetch(self, useragent, url): - """using the parsed robots.txt decide if useragent can fetch url""" - if self.disallow_all: - return False - if self.allow_all: - return True - # search for given user agent matches - # the first match counts - parsed_url = urllib.parse.urlparse(urllib.parse.unquote(url)) - url = urllib.parse.urlunparse(('','',parsed_url.path, - parsed_url.params,parsed_url.query, parsed_url.fragment)) - url = urllib.parse.quote(url) - if not url: - url = "/" - for entry in self.entries: - if entry.applies_to(useragent): - return entry.allowance(url) - # try the default entry last - if self.default_entry: - return self.default_entry.allowance(url) - # agent not found ==> access granted - return True - - def __str__(self): - return ''.join([str(entry) + "\n" for entry in self.entries]) - - -class RuleLine(object): - """A rule line is a single "Allow:" (allowance==True) or "Disallow:" - (allowance==False) followed by a path.""" - def __init__(self, path, allowance): - if path == '' and not allowance: - # an empty value means allow all - allowance = True - self.path = urllib.parse.quote(path) - self.allowance = allowance - - def applies_to(self, filename): - return self.path == "*" or filename.startswith(self.path) - - def __str__(self): - return (self.allowance and "Allow" or "Disallow") + ": " + self.path - - -class Entry(object): - """An entry has one or more user-agents and zero or more rulelines""" - def __init__(self): - self.useragents = [] - self.rulelines = [] - - def __str__(self): - ret = [] - for agent in self.useragents: - ret.extend(["User-agent: ", agent, "\n"]) - for line in self.rulelines: - ret.extend([str(line), "\n"]) - return ''.join(ret) - - def applies_to(self, useragent): - """check if this entry applies to the specified agent""" - # split the name token and make it lower case - useragent = useragent.split("/")[0].lower() - for agent in self.useragents: - if agent == '*': - # we have the catch-all agent - return True - agent = agent.lower() - if agent in useragent: - return True - return False - - def allowance(self, filename): - """Preconditions: - - our agent applies to this entry - - filename is URL decoded""" - for line in self.rulelines: - if line.applies_to(filename): - return line.allowance - return True diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/__init__.py deleted file mode 100755 index 196d378..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# This directory is a Python package. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/client.py deleted file mode 100755 index b78e5ba..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/client.py +++ /dev/null @@ -1,1496 +0,0 @@ -# -# XML-RPC CLIENT LIBRARY -# $Id$ -# -# an XML-RPC client interface for Python. -# -# the marshalling and response parser code can also be used to -# implement XML-RPC servers. -# -# Notes: -# this version is designed to work with Python 2.1 or newer. -# -# History: -# 1999-01-14 fl Created -# 1999-01-15 fl Changed dateTime to use localtime -# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service -# 1999-01-19 fl Fixed array data element (from Skip Montanaro) -# 1999-01-21 fl Fixed dateTime constructor, etc. -# 1999-02-02 fl Added fault handling, handle empty sequences, etc. -# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro) -# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8) -# 2000-11-28 fl Changed boolean to check the truth value of its argument -# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches -# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1) -# 2001-03-28 fl Make sure response tuple is a singleton -# 2001-03-29 fl Don't require empty params element (from Nicholas Riley) -# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2) -# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod) -# 2001-09-03 fl Allow Transport subclass to override getparser -# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup) -# 2001-10-01 fl Remove containers from memo cache when done with them -# 2001-10-01 fl Use faster escape method (80% dumps speedup) -# 2001-10-02 fl More dumps microtuning -# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum) -# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow -# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems) -# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix) -# 2002-03-17 fl Avoid buffered read when possible (from James Rucker) -# 2002-04-07 fl Added pythondoc comments -# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers -# 2002-05-15 fl Added error constants (from Andrew Kuchling) -# 2002-06-27 fl Merged with Python CVS version -# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby) -# 2003-01-22 sm Add support for the bool type -# 2003-02-27 gvr Remove apply calls -# 2003-04-24 sm Use cStringIO if available -# 2003-04-25 ak Add support for nil -# 2003-06-15 gn Add support for time.struct_time -# 2003-07-12 gp Correct marshalling of Faults -# 2003-10-31 mvl Add multicall support -# 2004-08-20 mvl Bump minimum supported Python version to 2.1 -# -# Copyright (c) 1999-2002 by Secret Labs AB. -# Copyright (c) 1999-2002 by Fredrik Lundh. -# -# info@pythonware.com -# http://www.pythonware.com -# -# -------------------------------------------------------------------- -# The XML-RPC client interface is -# -# Copyright (c) 1999-2002 by Secret Labs AB -# Copyright (c) 1999-2002 by Fredrik Lundh -# -# By obtaining, using, and/or copying this software and/or its -# associated documentation, you agree that you have read, understood, -# and will comply with the following terms and conditions: -# -# Permission to use, copy, modify, and distribute this software and -# its associated documentation for any purpose and without fee is -# hereby granted, provided that the above copyright notice appears in -# all copies, and that both that copyright notice and this permission -# notice appear in supporting documentation, and that the name of -# Secret Labs AB or the author not be used in advertising or publicity -# pertaining to distribution of the software without specific, written -# prior permission. -# -# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD -# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- -# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR -# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY -# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS -# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE -# OF THIS SOFTWARE. -# -------------------------------------------------------------------- - -""" -Ported using Python-Future from the Python 3.3 standard library. - -An XML-RPC client interface for Python. - -The marshalling and response parser code can also be used to -implement XML-RPC servers. - -Exported exceptions: - - Error Base class for client errors - ProtocolError Indicates an HTTP protocol error - ResponseError Indicates a broken response package - Fault Indicates an XML-RPC fault package - -Exported classes: - - ServerProxy Represents a logical connection to an XML-RPC server - - MultiCall Executor of boxcared xmlrpc requests - DateTime dateTime wrapper for an ISO 8601 string or time tuple or - localtime integer value to generate a "dateTime.iso8601" - XML-RPC value - Binary binary data wrapper - - Marshaller Generate an XML-RPC params chunk from a Python data structure - Unmarshaller Unmarshal an XML-RPC response from incoming XML event message - Transport Handles an HTTP transaction to an XML-RPC server - SafeTransport Handles an HTTPS transaction to an XML-RPC server - -Exported constants: - - (none) - -Exported functions: - - getparser Create instance of the fastest available parser & attach - to an unmarshalling object - dumps Convert an argument tuple or a Fault instance to an XML-RPC - request (or response, if the methodresponse option is used). - loads Convert an XML-RPC packet to unmarshalled data plus a method - name (None if not present). -""" - -from __future__ import (absolute_import, division, print_function, - unicode_literals) -from future.builtins import bytes, dict, int, range, str - -import base64 -# Py2.7 compatibility hack -base64.encodebytes = base64.encodestring -base64.decodebytes = base64.decodestring -import sys -import time -from datetime import datetime -from future.backports.http import client as http_client -from future.backports.urllib import parse as urllib_parse -from future.utils import ensure_new_type -from xml.parsers import expat -import socket -import errno -from io import BytesIO -try: - import gzip -except ImportError: - gzip = None #python can be built without zlib/gzip support - -# -------------------------------------------------------------------- -# Internal stuff - -def escape(s): - s = s.replace("&", "&") - s = s.replace("<", "<") - return s.replace(">", ">",) - -# used in User-Agent header sent -__version__ = sys.version[:3] - -# xmlrpc integer limits -MAXINT = 2**31-1 -MININT = -2**31 - -# -------------------------------------------------------------------- -# Error constants (from Dan Libby's specification at -# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php) - -# Ranges of errors -PARSE_ERROR = -32700 -SERVER_ERROR = -32600 -APPLICATION_ERROR = -32500 -SYSTEM_ERROR = -32400 -TRANSPORT_ERROR = -32300 - -# Specific errors -NOT_WELLFORMED_ERROR = -32700 -UNSUPPORTED_ENCODING = -32701 -INVALID_ENCODING_CHAR = -32702 -INVALID_XMLRPC = -32600 -METHOD_NOT_FOUND = -32601 -INVALID_METHOD_PARAMS = -32602 -INTERNAL_ERROR = -32603 - -# -------------------------------------------------------------------- -# Exceptions - -## -# Base class for all kinds of client-side errors. - -class Error(Exception): - """Base class for client errors.""" - def __str__(self): - return repr(self) - -## -# Indicates an HTTP-level protocol error. This is raised by the HTTP -# transport layer, if the server returns an error code other than 200 -# (OK). -# -# @param url The target URL. -# @param errcode The HTTP error code. -# @param errmsg The HTTP error message. -# @param headers The HTTP header dictionary. - -class ProtocolError(Error): - """Indicates an HTTP protocol error.""" - def __init__(self, url, errcode, errmsg, headers): - Error.__init__(self) - self.url = url - self.errcode = errcode - self.errmsg = errmsg - self.headers = headers - def __repr__(self): - return ( - "" % - (self.url, self.errcode, self.errmsg) - ) - -## -# Indicates a broken XML-RPC response package. This exception is -# raised by the unmarshalling layer, if the XML-RPC response is -# malformed. - -class ResponseError(Error): - """Indicates a broken response package.""" - pass - -## -# Indicates an XML-RPC fault response package. This exception is -# raised by the unmarshalling layer, if the XML-RPC response contains -# a fault string. This exception can also be used as a class, to -# generate a fault XML-RPC message. -# -# @param faultCode The XML-RPC fault code. -# @param faultString The XML-RPC fault string. - -class Fault(Error): - """Indicates an XML-RPC fault package.""" - def __init__(self, faultCode, faultString, **extra): - Error.__init__(self) - self.faultCode = faultCode - self.faultString = faultString - def __repr__(self): - return "" % (ensure_new_type(self.faultCode), - ensure_new_type(self.faultString)) - -# -------------------------------------------------------------------- -# Special values - -## -# Backwards compatibility - -boolean = Boolean = bool - -## -# Wrapper for XML-RPC DateTime values. This converts a time value to -# the format used by XML-RPC. -#

-# The value can be given as a datetime object, as a string in the -# format "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by -# time.localtime()), or an integer value (as returned by time.time()). -# The wrapper uses time.localtime() to convert an integer to a time -# tuple. -# -# @param value The time, given as a datetime object, an ISO 8601 string, -# a time tuple, or an integer time value. - - -### For Python-Future: -def _iso8601_format(value): - return "%04d%02d%02dT%02d:%02d:%02d" % ( - value.year, value.month, value.day, - value.hour, value.minute, value.second) -### -# Issue #13305: different format codes across platforms -# _day0 = datetime(1, 1, 1) -# if _day0.strftime('%Y') == '0001': # Mac OS X -# def _iso8601_format(value): -# return value.strftime("%Y%m%dT%H:%M:%S") -# elif _day0.strftime('%4Y') == '0001': # Linux -# def _iso8601_format(value): -# return value.strftime("%4Y%m%dT%H:%M:%S") -# else: -# def _iso8601_format(value): -# return value.strftime("%Y%m%dT%H:%M:%S").zfill(17) -# del _day0 - - -def _strftime(value): - if isinstance(value, datetime): - return _iso8601_format(value) - - if not isinstance(value, (tuple, time.struct_time)): - if value == 0: - value = time.time() - value = time.localtime(value) - - return "%04d%02d%02dT%02d:%02d:%02d" % value[:6] - -class DateTime(object): - """DateTime wrapper for an ISO 8601 string or time tuple or - localtime integer value to generate 'dateTime.iso8601' XML-RPC - value. - """ - - def __init__(self, value=0): - if isinstance(value, str): - self.value = value - else: - self.value = _strftime(value) - - def make_comparable(self, other): - if isinstance(other, DateTime): - s = self.value - o = other.value - elif isinstance(other, datetime): - s = self.value - o = _iso8601_format(other) - elif isinstance(other, str): - s = self.value - o = other - elif hasattr(other, "timetuple"): - s = self.timetuple() - o = other.timetuple() - else: - otype = (hasattr(other, "__class__") - and other.__class__.__name__ - or type(other)) - raise TypeError("Can't compare %s and %s" % - (self.__class__.__name__, otype)) - return s, o - - def __lt__(self, other): - s, o = self.make_comparable(other) - return s < o - - def __le__(self, other): - s, o = self.make_comparable(other) - return s <= o - - def __gt__(self, other): - s, o = self.make_comparable(other) - return s > o - - def __ge__(self, other): - s, o = self.make_comparable(other) - return s >= o - - def __eq__(self, other): - s, o = self.make_comparable(other) - return s == o - - def __ne__(self, other): - s, o = self.make_comparable(other) - return s != o - - def timetuple(self): - return time.strptime(self.value, "%Y%m%dT%H:%M:%S") - - ## - # Get date/time value. - # - # @return Date/time value, as an ISO 8601 string. - - def __str__(self): - return self.value - - def __repr__(self): - return "" % (ensure_new_type(self.value), id(self)) - - def decode(self, data): - self.value = str(data).strip() - - def encode(self, out): - out.write("") - out.write(self.value) - out.write("\n") - -def _datetime(data): - # decode xml element contents into a DateTime structure. - value = DateTime() - value.decode(data) - return value - -def _datetime_type(data): - return datetime.strptime(data, "%Y%m%dT%H:%M:%S") - -## -# Wrapper for binary data. This can be used to transport any kind -# of binary data over XML-RPC, using BASE64 encoding. -# -# @param data An 8-bit string containing arbitrary data. - -class Binary(object): - """Wrapper for binary data.""" - - def __init__(self, data=None): - if data is None: - data = b"" - else: - if not isinstance(data, (bytes, bytearray)): - raise TypeError("expected bytes or bytearray, not %s" % - data.__class__.__name__) - data = bytes(data) # Make a copy of the bytes! - self.data = data - - ## - # Get buffer contents. - # - # @return Buffer contents, as an 8-bit string. - - def __str__(self): - return str(self.data, "latin-1") # XXX encoding?! - - def __eq__(self, other): - if isinstance(other, Binary): - other = other.data - return self.data == other - - def __ne__(self, other): - if isinstance(other, Binary): - other = other.data - return self.data != other - - def decode(self, data): - self.data = base64.decodebytes(data) - - def encode(self, out): - out.write("\n") - encoded = base64.encodebytes(self.data) - out.write(encoded.decode('ascii')) - out.write("\n") - -def _binary(data): - # decode xml element contents into a Binary structure - value = Binary() - value.decode(data) - return value - -WRAPPERS = (DateTime, Binary) - -# -------------------------------------------------------------------- -# XML parsers - -class ExpatParser(object): - # fast expat parser for Python 2.0 and later. - def __init__(self, target): - self._parser = parser = expat.ParserCreate(None, None) - self._target = target - parser.StartElementHandler = target.start - parser.EndElementHandler = target.end - parser.CharacterDataHandler = target.data - encoding = None - target.xml(encoding, None) - - def feed(self, data): - self._parser.Parse(data, 0) - - def close(self): - self._parser.Parse("", 1) # end of data - del self._target, self._parser # get rid of circular references - -# -------------------------------------------------------------------- -# XML-RPC marshalling and unmarshalling code - -## -# XML-RPC marshaller. -# -# @param encoding Default encoding for 8-bit strings. The default -# value is None (interpreted as UTF-8). -# @see dumps - -class Marshaller(object): - """Generate an XML-RPC params chunk from a Python data structure. - - Create a Marshaller instance for each set of parameters, and use - the "dumps" method to convert your data (represented as a tuple) - to an XML-RPC params chunk. To write a fault response, pass a - Fault instance instead. You may prefer to use the "dumps" module - function for this purpose. - """ - - # by the way, if you don't understand what's going on in here, - # that's perfectly ok. - - def __init__(self, encoding=None, allow_none=False): - self.memo = {} - self.data = None - self.encoding = encoding - self.allow_none = allow_none - - dispatch = {} - - def dumps(self, values): - out = [] - write = out.append - dump = self.__dump - if isinstance(values, Fault): - # fault instance - write("\n") - dump({'faultCode': values.faultCode, - 'faultString': values.faultString}, - write) - write("\n") - else: - # parameter block - # FIXME: the xml-rpc specification allows us to leave out - # the entire block if there are no parameters. - # however, changing this may break older code (including - # old versions of xmlrpclib.py), so this is better left as - # is for now. See @XMLRPC3 for more information. /F - write("\n") - for v in values: - write("\n") - dump(v, write) - write("\n") - write("\n") - result = "".join(out) - return str(result) - - def __dump(self, value, write): - try: - f = self.dispatch[type(ensure_new_type(value))] - except KeyError: - # check if this object can be marshalled as a structure - if not hasattr(value, '__dict__'): - raise TypeError("cannot marshal %s objects" % type(value)) - # check if this class is a sub-class of a basic type, - # because we don't know how to marshal these types - # (e.g. a string sub-class) - for type_ in type(value).__mro__: - if type_ in self.dispatch.keys(): - raise TypeError("cannot marshal %s objects" % type(value)) - # XXX(twouters): using "_arbitrary_instance" as key as a quick-fix - # for the p3yk merge, this should probably be fixed more neatly. - f = self.dispatch["_arbitrary_instance"] - f(self, value, write) - - def dump_nil (self, value, write): - if not self.allow_none: - raise TypeError("cannot marshal None unless allow_none is enabled") - write("") - dispatch[type(None)] = dump_nil - - def dump_bool(self, value, write): - write("") - write(value and "1" or "0") - write("\n") - dispatch[bool] = dump_bool - - def dump_long(self, value, write): - if value > MAXINT or value < MININT: - raise OverflowError("long int exceeds XML-RPC limits") - write("") - write(str(int(value))) - write("\n") - dispatch[int] = dump_long - - # backward compatible - dump_int = dump_long - - def dump_double(self, value, write): - write("") - write(repr(ensure_new_type(value))) - write("\n") - dispatch[float] = dump_double - - def dump_unicode(self, value, write, escape=escape): - write("") - write(escape(value)) - write("\n") - dispatch[str] = dump_unicode - - def dump_bytes(self, value, write): - write("\n") - encoded = base64.encodebytes(value) - write(encoded.decode('ascii')) - write("\n") - dispatch[bytes] = dump_bytes - dispatch[bytearray] = dump_bytes - - def dump_array(self, value, write): - i = id(value) - if i in self.memo: - raise TypeError("cannot marshal recursive sequences") - self.memo[i] = None - dump = self.__dump - write("\n") - for v in value: - dump(v, write) - write("\n") - del self.memo[i] - dispatch[tuple] = dump_array - dispatch[list] = dump_array - - def dump_struct(self, value, write, escape=escape): - i = id(value) - if i in self.memo: - raise TypeError("cannot marshal recursive dictionaries") - self.memo[i] = None - dump = self.__dump - write("\n") - for k, v in value.items(): - write("\n") - if not isinstance(k, str): - raise TypeError("dictionary key must be string") - write("%s\n" % escape(k)) - dump(v, write) - write("\n") - write("\n") - del self.memo[i] - dispatch[dict] = dump_struct - - def dump_datetime(self, value, write): - write("") - write(_strftime(value)) - write("\n") - dispatch[datetime] = dump_datetime - - def dump_instance(self, value, write): - # check for special wrappers - if value.__class__ in WRAPPERS: - self.write = write - value.encode(self) - del self.write - else: - # store instance attributes as a struct (really?) - self.dump_struct(value.__dict__, write) - dispatch[DateTime] = dump_instance - dispatch[Binary] = dump_instance - # XXX(twouters): using "_arbitrary_instance" as key as a quick-fix - # for the p3yk merge, this should probably be fixed more neatly. - dispatch["_arbitrary_instance"] = dump_instance - -## -# XML-RPC unmarshaller. -# -# @see loads - -class Unmarshaller(object): - """Unmarshal an XML-RPC response, based on incoming XML event - messages (start, data, end). Call close() to get the resulting - data structure. - - Note that this reader is fairly tolerant, and gladly accepts bogus - XML-RPC data without complaining (but not bogus XML). - """ - - # and again, if you don't understand what's going on in here, - # that's perfectly ok. - - def __init__(self, use_datetime=False, use_builtin_types=False): - self._type = None - self._stack = [] - self._marks = [] - self._data = [] - self._methodname = None - self._encoding = "utf-8" - self.append = self._stack.append - self._use_datetime = use_builtin_types or use_datetime - self._use_bytes = use_builtin_types - - def close(self): - # return response tuple and target method - if self._type is None or self._marks: - raise ResponseError() - if self._type == "fault": - raise Fault(**self._stack[0]) - return tuple(self._stack) - - def getmethodname(self): - return self._methodname - - # - # event handlers - - def xml(self, encoding, standalone): - self._encoding = encoding - # FIXME: assert standalone == 1 ??? - - def start(self, tag, attrs): - # prepare to handle this element - if tag == "array" or tag == "struct": - self._marks.append(len(self._stack)) - self._data = [] - self._value = (tag == "value") - - def data(self, text): - self._data.append(text) - - def end(self, tag): - # call the appropriate end tag handler - try: - f = self.dispatch[tag] - except KeyError: - pass # unknown tag ? - else: - return f(self, "".join(self._data)) - - # - # accelerator support - - def end_dispatch(self, tag, data): - # dispatch data - try: - f = self.dispatch[tag] - except KeyError: - pass # unknown tag ? - else: - return f(self, data) - - # - # element decoders - - dispatch = {} - - def end_nil (self, data): - self.append(None) - self._value = 0 - dispatch["nil"] = end_nil - - def end_boolean(self, data): - if data == "0": - self.append(False) - elif data == "1": - self.append(True) - else: - raise TypeError("bad boolean value") - self._value = 0 - dispatch["boolean"] = end_boolean - - def end_int(self, data): - self.append(int(data)) - self._value = 0 - dispatch["i4"] = end_int - dispatch["i8"] = end_int - dispatch["int"] = end_int - - def end_double(self, data): - self.append(float(data)) - self._value = 0 - dispatch["double"] = end_double - - def end_string(self, data): - if self._encoding: - data = data.decode(self._encoding) - self.append(data) - self._value = 0 - dispatch["string"] = end_string - dispatch["name"] = end_string # struct keys are always strings - - def end_array(self, data): - mark = self._marks.pop() - # map arrays to Python lists - self._stack[mark:] = [self._stack[mark:]] - self._value = 0 - dispatch["array"] = end_array - - def end_struct(self, data): - mark = self._marks.pop() - # map structs to Python dictionaries - dict = {} - items = self._stack[mark:] - for i in range(0, len(items), 2): - dict[items[i]] = items[i+1] - self._stack[mark:] = [dict] - self._value = 0 - dispatch["struct"] = end_struct - - def end_base64(self, data): - value = Binary() - value.decode(data.encode("ascii")) - if self._use_bytes: - value = value.data - self.append(value) - self._value = 0 - dispatch["base64"] = end_base64 - - def end_dateTime(self, data): - value = DateTime() - value.decode(data) - if self._use_datetime: - value = _datetime_type(data) - self.append(value) - dispatch["dateTime.iso8601"] = end_dateTime - - def end_value(self, data): - # if we stumble upon a value element with no internal - # elements, treat it as a string element - if self._value: - self.end_string(data) - dispatch["value"] = end_value - - def end_params(self, data): - self._type = "params" - dispatch["params"] = end_params - - def end_fault(self, data): - self._type = "fault" - dispatch["fault"] = end_fault - - def end_methodName(self, data): - if self._encoding: - data = data.decode(self._encoding) - self._methodname = data - self._type = "methodName" # no params - dispatch["methodName"] = end_methodName - -## Multicall support -# - -class _MultiCallMethod(object): - # some lesser magic to store calls made to a MultiCall object - # for batch execution - def __init__(self, call_list, name): - self.__call_list = call_list - self.__name = name - def __getattr__(self, name): - return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name)) - def __call__(self, *args): - self.__call_list.append((self.__name, args)) - -class MultiCallIterator(object): - """Iterates over the results of a multicall. Exceptions are - raised in response to xmlrpc faults.""" - - def __init__(self, results): - self.results = results - - def __getitem__(self, i): - item = self.results[i] - if isinstance(type(item), dict): - raise Fault(item['faultCode'], item['faultString']) - elif type(item) == type([]): - return item[0] - else: - raise ValueError("unexpected type in multicall result") - -class MultiCall(object): - """server -> a object used to boxcar method calls - - server should be a ServerProxy object. - - Methods can be added to the MultiCall using normal - method call syntax e.g.: - - multicall = MultiCall(server_proxy) - multicall.add(2,3) - multicall.get_address("Guido") - - To execute the multicall, call the MultiCall object e.g.: - - add_result, address = multicall() - """ - - def __init__(self, server): - self.__server = server - self.__call_list = [] - - def __repr__(self): - return "" % id(self) - - __str__ = __repr__ - - def __getattr__(self, name): - return _MultiCallMethod(self.__call_list, name) - - def __call__(self): - marshalled_list = [] - for name, args in self.__call_list: - marshalled_list.append({'methodName' : name, 'params' : args}) - - return MultiCallIterator(self.__server.system.multicall(marshalled_list)) - -# -------------------------------------------------------------------- -# convenience functions - -FastMarshaller = FastParser = FastUnmarshaller = None - -## -# Create a parser object, and connect it to an unmarshalling instance. -# This function picks the fastest available XML parser. -# -# return A (parser, unmarshaller) tuple. - -def getparser(use_datetime=False, use_builtin_types=False): - """getparser() -> parser, unmarshaller - - Create an instance of the fastest available parser, and attach it - to an unmarshalling object. Return both objects. - """ - if FastParser and FastUnmarshaller: - if use_builtin_types: - mkdatetime = _datetime_type - mkbytes = base64.decodebytes - elif use_datetime: - mkdatetime = _datetime_type - mkbytes = _binary - else: - mkdatetime = _datetime - mkbytes = _binary - target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault) - parser = FastParser(target) - else: - target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=use_builtin_types) - if FastParser: - parser = FastParser(target) - else: - parser = ExpatParser(target) - return parser, target - -## -# Convert a Python tuple or a Fault instance to an XML-RPC packet. -# -# @def dumps(params, **options) -# @param params A tuple or Fault instance. -# @keyparam methodname If given, create a methodCall request for -# this method name. -# @keyparam methodresponse If given, create a methodResponse packet. -# If used with a tuple, the tuple must be a singleton (that is, -# it must contain exactly one element). -# @keyparam encoding The packet encoding. -# @return A string containing marshalled data. - -def dumps(params, methodname=None, methodresponse=None, encoding=None, - allow_none=False): - """data [,options] -> marshalled data - - Convert an argument tuple or a Fault instance to an XML-RPC - request (or response, if the methodresponse option is used). - - In addition to the data object, the following options can be given - as keyword arguments: - - methodname: the method name for a methodCall packet - - methodresponse: true to create a methodResponse packet. - If this option is used with a tuple, the tuple must be - a singleton (i.e. it can contain only one element). - - encoding: the packet encoding (default is UTF-8) - - All byte strings in the data structure are assumed to use the - packet encoding. Unicode strings are automatically converted, - where necessary. - """ - - assert isinstance(params, (tuple, Fault)), "argument must be tuple or Fault instance" - if isinstance(params, Fault): - methodresponse = 1 - elif methodresponse and isinstance(params, tuple): - assert len(params) == 1, "response tuple must be a singleton" - - if not encoding: - encoding = "utf-8" - - if FastMarshaller: - m = FastMarshaller(encoding) - else: - m = Marshaller(encoding, allow_none) - - data = m.dumps(params) - - if encoding != "utf-8": - xmlheader = "\n" % str(encoding) - else: - xmlheader = "\n" # utf-8 is default - - # standard XML-RPC wrappings - if methodname: - # a method call - if not isinstance(methodname, str): - methodname = methodname.encode(encoding) - data = ( - xmlheader, - "\n" - "", methodname, "\n", - data, - "\n" - ) - elif methodresponse: - # a method response, or a fault structure - data = ( - xmlheader, - "\n", - data, - "\n" - ) - else: - return data # return as is - return str("").join(data) - -## -# Convert an XML-RPC packet to a Python object. If the XML-RPC packet -# represents a fault condition, this function raises a Fault exception. -# -# @param data An XML-RPC packet, given as an 8-bit string. -# @return A tuple containing the unpacked data, and the method name -# (None if not present). -# @see Fault - -def loads(data, use_datetime=False, use_builtin_types=False): - """data -> unmarshalled data, method name - - Convert an XML-RPC packet to unmarshalled data plus a method - name (None if not present). - - If the XML-RPC packet represents a fault condition, this function - raises a Fault exception. - """ - p, u = getparser(use_datetime=use_datetime, use_builtin_types=use_builtin_types) - p.feed(data) - p.close() - return u.close(), u.getmethodname() - -## -# Encode a string using the gzip content encoding such as specified by the -# Content-Encoding: gzip -# in the HTTP header, as described in RFC 1952 -# -# @param data the unencoded data -# @return the encoded data - -def gzip_encode(data): - """data -> gzip encoded data - - Encode data using the gzip content encoding as described in RFC 1952 - """ - if not gzip: - raise NotImplementedError - f = BytesIO() - gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1) - gzf.write(data) - gzf.close() - encoded = f.getvalue() - f.close() - return encoded - -## -# Decode a string using the gzip content encoding such as specified by the -# Content-Encoding: gzip -# in the HTTP header, as described in RFC 1952 -# -# @param data The encoded data -# @return the unencoded data -# @raises ValueError if data is not correctly coded. - -def gzip_decode(data): - """gzip encoded data -> unencoded data - - Decode data using the gzip content encoding as described in RFC 1952 - """ - if not gzip: - raise NotImplementedError - f = BytesIO(data) - gzf = gzip.GzipFile(mode="rb", fileobj=f) - try: - decoded = gzf.read() - except IOError: - raise ValueError("invalid data") - f.close() - gzf.close() - return decoded - -## -# Return a decoded file-like object for the gzip encoding -# as described in RFC 1952. -# -# @param response A stream supporting a read() method -# @return a file-like object that the decoded data can be read() from - -class GzipDecodedResponse(gzip.GzipFile if gzip else object): - """a file-like object to decode a response encoded with the gzip - method, as described in RFC 1952. - """ - def __init__(self, response): - #response doesn't support tell() and read(), required by - #GzipFile - if not gzip: - raise NotImplementedError - self.io = BytesIO(response.read()) - gzip.GzipFile.__init__(self, mode="rb", fileobj=self.io) - - def close(self): - gzip.GzipFile.close(self) - self.io.close() - - -# -------------------------------------------------------------------- -# request dispatcher - -class _Method(object): - # some magic to bind an XML-RPC method to an RPC server. - # supports "nested" methods (e.g. examples.getStateName) - def __init__(self, send, name): - self.__send = send - self.__name = name - def __getattr__(self, name): - return _Method(self.__send, "%s.%s" % (self.__name, name)) - def __call__(self, *args): - return self.__send(self.__name, args) - -## -# Standard transport class for XML-RPC over HTTP. -#

-# You can create custom transports by subclassing this method, and -# overriding selected methods. - -class Transport(object): - """Handles an HTTP transaction to an XML-RPC server.""" - - # client identifier (may be overridden) - user_agent = "Python-xmlrpc/%s" % __version__ - - #if true, we'll request gzip encoding - accept_gzip_encoding = True - - # if positive, encode request using gzip if it exceeds this threshold - # note that many server will get confused, so only use it if you know - # that they can decode such a request - encode_threshold = None #None = don't encode - - def __init__(self, use_datetime=False, use_builtin_types=False): - self._use_datetime = use_datetime - self._use_builtin_types = use_builtin_types - self._connection = (None, None) - self._extra_headers = [] - - ## - # Send a complete request, and parse the response. - # Retry request if a cached connection has disconnected. - # - # @param host Target host. - # @param handler Target PRC handler. - # @param request_body XML-RPC request body. - # @param verbose Debugging flag. - # @return Parsed response. - - def request(self, host, handler, request_body, verbose=False): - #retry request once if cached connection has gone cold - for i in (0, 1): - try: - return self.single_request(host, handler, request_body, verbose) - except socket.error as e: - if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE): - raise - except http_client.BadStatusLine: #close after we sent request - if i: - raise - - def single_request(self, host, handler, request_body, verbose=False): - # issue XML-RPC request - try: - http_conn = self.send_request(host, handler, request_body, verbose) - resp = http_conn.getresponse() - if resp.status == 200: - self.verbose = verbose - return self.parse_response(resp) - - except Fault: - raise - except Exception: - #All unexpected errors leave connection in - # a strange state, so we clear it. - self.close() - raise - - #We got an error response. - #Discard any response data and raise exception - if resp.getheader("content-length", ""): - resp.read() - raise ProtocolError( - host + handler, - resp.status, resp.reason, - dict(resp.getheaders()) - ) - - - ## - # Create parser. - # - # @return A 2-tuple containing a parser and a unmarshaller. - - def getparser(self): - # get parser and unmarshaller - return getparser(use_datetime=self._use_datetime, - use_builtin_types=self._use_builtin_types) - - ## - # Get authorization info from host parameter - # Host may be a string, or a (host, x509-dict) tuple; if a string, - # it is checked for a "user:pw@host" format, and a "Basic - # Authentication" header is added if appropriate. - # - # @param host Host descriptor (URL or (URL, x509 info) tuple). - # @return A 3-tuple containing (actual host, extra headers, - # x509 info). The header and x509 fields may be None. - - def get_host_info(self, host): - - x509 = {} - if isinstance(host, tuple): - host, x509 = host - - auth, host = urllib_parse.splituser(host) - - if auth: - auth = urllib_parse.unquote_to_bytes(auth) - auth = base64.encodebytes(auth).decode("utf-8") - auth = "".join(auth.split()) # get rid of whitespace - extra_headers = [ - ("Authorization", "Basic " + auth) - ] - else: - extra_headers = [] - - return host, extra_headers, x509 - - ## - # Connect to server. - # - # @param host Target host. - # @return An HTTPConnection object - - def make_connection(self, host): - #return an existing connection if possible. This allows - #HTTP/1.1 keep-alive. - if self._connection and host == self._connection[0]: - return self._connection[1] - # create a HTTP connection object from a host descriptor - chost, self._extra_headers, x509 = self.get_host_info(host) - self._connection = host, http_client.HTTPConnection(chost) - return self._connection[1] - - ## - # Clear any cached connection object. - # Used in the event of socket errors. - # - def close(self): - if self._connection[1]: - self._connection[1].close() - self._connection = (None, None) - - ## - # Send HTTP request. - # - # @param host Host descriptor (URL or (URL, x509 info) tuple). - # @param handler Targer RPC handler (a path relative to host) - # @param request_body The XML-RPC request body - # @param debug Enable debugging if debug is true. - # @return An HTTPConnection. - - def send_request(self, host, handler, request_body, debug): - connection = self.make_connection(host) - headers = self._extra_headers[:] - if debug: - connection.set_debuglevel(1) - if self.accept_gzip_encoding and gzip: - connection.putrequest("POST", handler, skip_accept_encoding=True) - headers.append(("Accept-Encoding", "gzip")) - else: - connection.putrequest("POST", handler) - headers.append(("Content-Type", "text/xml")) - headers.append(("User-Agent", self.user_agent)) - self.send_headers(connection, headers) - self.send_content(connection, request_body) - return connection - - ## - # Send request headers. - # This function provides a useful hook for subclassing - # - # @param connection httpConnection. - # @param headers list of key,value pairs for HTTP headers - - def send_headers(self, connection, headers): - for key, val in headers: - connection.putheader(key, val) - - ## - # Send request body. - # This function provides a useful hook for subclassing - # - # @param connection httpConnection. - # @param request_body XML-RPC request body. - - def send_content(self, connection, request_body): - #optionally encode the request - if (self.encode_threshold is not None and - self.encode_threshold < len(request_body) and - gzip): - connection.putheader("Content-Encoding", "gzip") - request_body = gzip_encode(request_body) - - connection.putheader("Content-Length", str(len(request_body))) - connection.endheaders(request_body) - - ## - # Parse response. - # - # @param file Stream. - # @return Response tuple and target method. - - def parse_response(self, response): - # read response data from httpresponse, and parse it - # Check for new http response object, otherwise it is a file object. - if hasattr(response, 'getheader'): - if response.getheader("Content-Encoding", "") == "gzip": - stream = GzipDecodedResponse(response) - else: - stream = response - else: - stream = response - - p, u = self.getparser() - - while 1: - data = stream.read(1024) - if not data: - break - if self.verbose: - print("body:", repr(data)) - p.feed(data) - - if stream is not response: - stream.close() - p.close() - - return u.close() - -## -# Standard transport class for XML-RPC over HTTPS. - -class SafeTransport(Transport): - """Handles an HTTPS transaction to an XML-RPC server.""" - - # FIXME: mostly untested - - def make_connection(self, host): - if self._connection and host == self._connection[0]: - return self._connection[1] - - if not hasattr(http_client, "HTTPSConnection"): - raise NotImplementedError( - "your version of http.client doesn't support HTTPS") - # create a HTTPS connection object from a host descriptor - # host may be a string, or a (host, x509-dict) tuple - chost, self._extra_headers, x509 = self.get_host_info(host) - self._connection = host, http_client.HTTPSConnection(chost, - None, **(x509 or {})) - return self._connection[1] - -## -# Standard server proxy. This class establishes a virtual connection -# to an XML-RPC server. -#

-# This class is available as ServerProxy and Server. New code should -# use ServerProxy, to avoid confusion. -# -# @def ServerProxy(uri, **options) -# @param uri The connection point on the server. -# @keyparam transport A transport factory, compatible with the -# standard transport class. -# @keyparam encoding The default encoding used for 8-bit strings -# (default is UTF-8). -# @keyparam verbose Use a true value to enable debugging output. -# (printed to standard output). -# @see Transport - -class ServerProxy(object): - """uri [,options] -> a logical connection to an XML-RPC server - - uri is the connection point on the server, given as - scheme://host/target. - - The standard implementation always supports the "http" scheme. If - SSL socket support is available (Python 2.0), it also supports - "https". - - If the target part and the slash preceding it are both omitted, - "/RPC2" is assumed. - - The following options can be given as keyword arguments: - - transport: a transport factory - encoding: the request encoding (default is UTF-8) - - All 8-bit strings passed to the server proxy are assumed to use - the given encoding. - """ - - def __init__(self, uri, transport=None, encoding=None, verbose=False, - allow_none=False, use_datetime=False, use_builtin_types=False): - # establish a "logical" server connection - - # get the url - type, uri = urllib_parse.splittype(uri) - if type not in ("http", "https"): - raise IOError("unsupported XML-RPC protocol") - self.__host, self.__handler = urllib_parse.splithost(uri) - if not self.__handler: - self.__handler = "/RPC2" - - if transport is None: - if type == "https": - handler = SafeTransport - else: - handler = Transport - transport = handler(use_datetime=use_datetime, - use_builtin_types=use_builtin_types) - self.__transport = transport - - self.__encoding = encoding or 'utf-8' - self.__verbose = verbose - self.__allow_none = allow_none - - def __close(self): - self.__transport.close() - - def __request(self, methodname, params): - # call a method on the remote server - - request = dumps(params, methodname, encoding=self.__encoding, - allow_none=self.__allow_none).encode(self.__encoding) - - response = self.__transport.request( - self.__host, - self.__handler, - request, - verbose=self.__verbose - ) - - if len(response) == 1: - response = response[0] - - return response - - def __repr__(self): - return ( - "" % - (self.__host, self.__handler) - ) - - __str__ = __repr__ - - def __getattr__(self, name): - # magic method dispatcher - return _Method(self.__request, name) - - # note: to call a remote object with an non-standard name, use - # result getattr(server, "strange-python-name")(args) - - def __call__(self, attr): - """A workaround to get special attributes on the ServerProxy - without interfering with the magic __getattr__ - """ - if attr == "close": - return self.__close - elif attr == "transport": - return self.__transport - raise AttributeError("Attribute %r not found" % (attr,)) - -# compatibility - -Server = ServerProxy - -# -------------------------------------------------------------------- -# test code - -if __name__ == "__main__": - - # simple test program (from the XML-RPC specification) - - # local server, available from Lib/xmlrpc/server.py - server = ServerProxy("http://localhost:8000") - - try: - print(server.currentTime.getCurrentTime()) - except Error as v: - print("ERROR", v) - - multi = MultiCall(server) - multi.getData() - multi.pow(2,9) - multi.add(1,2) - try: - for response in multi(): - print(response) - except Error as v: - print("ERROR", v) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/server.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/server.py deleted file mode 100755 index 28072bf..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/xmlrpc/server.py +++ /dev/null @@ -1,999 +0,0 @@ -r""" -Ported using Python-Future from the Python 3.3 standard library. - -XML-RPC Servers. - -This module can be used to create simple XML-RPC servers -by creating a server and either installing functions, a -class instance, or by extending the SimpleXMLRPCServer -class. - -It can also be used to handle XML-RPC requests in a CGI -environment using CGIXMLRPCRequestHandler. - -The Doc* classes can be used to create XML-RPC servers that -serve pydoc-style documentation in response to HTTP -GET requests. This documentation is dynamically generated -based on the functions and methods registered with the -server. - -A list of possible usage patterns follows: - -1. Install functions: - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_function(pow) -server.register_function(lambda x,y: x+y, 'add') -server.serve_forever() - -2. Install an instance: - -class MyFuncs: - def __init__(self): - # make all of the sys functions available through sys.func_name - import sys - self.sys = sys - def _listMethods(self): - # implement this method so that system.listMethods - # knows to advertise the sys methods - return list_public_methods(self) + \ - ['sys.' + method for method in list_public_methods(self.sys)] - def pow(self, x, y): return pow(x, y) - def add(self, x, y) : return x + y - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_introspection_functions() -server.register_instance(MyFuncs()) -server.serve_forever() - -3. Install an instance with custom dispatch method: - -class Math: - def _listMethods(self): - # this method must be present for system.listMethods - # to work - return ['add', 'pow'] - def _methodHelp(self, method): - # this method must be present for system.methodHelp - # to work - if method == 'add': - return "add(2,3) => 5" - elif method == 'pow': - return "pow(x, y[, z]) => number" - else: - # By convention, return empty - # string if no help is available - return "" - def _dispatch(self, method, params): - if method == 'pow': - return pow(*params) - elif method == 'add': - return params[0] + params[1] - else: - raise ValueError('bad method') - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_introspection_functions() -server.register_instance(Math()) -server.serve_forever() - -4. Subclass SimpleXMLRPCServer: - -class MathServer(SimpleXMLRPCServer): - def _dispatch(self, method, params): - try: - # We are forcing the 'export_' prefix on methods that are - # callable through XML-RPC to prevent potential security - # problems - func = getattr(self, 'export_' + method) - except AttributeError: - raise Exception('method "%s" is not supported' % method) - else: - return func(*params) - - def export_add(self, x, y): - return x + y - -server = MathServer(("localhost", 8000)) -server.serve_forever() - -5. CGI script: - -server = CGIXMLRPCRequestHandler() -server.register_function(pow) -server.handle_request() -""" - -from __future__ import absolute_import, division, print_function, unicode_literals -from future.builtins import int, str - -# Written by Brian Quinlan (brian@sweetapp.com). -# Based on code written by Fredrik Lundh. - -from future.backports.xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode -from future.backports.http.server import BaseHTTPRequestHandler -import future.backports.http.server as http_server -from future.backports import socketserver -import sys -import os -import re -import pydoc -import inspect -import traceback -try: - import fcntl -except ImportError: - fcntl = None - -def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): - """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d - - Resolves a dotted attribute name to an object. Raises - an AttributeError if any attribute in the chain starts with a '_'. - - If the optional allow_dotted_names argument is false, dots are not - supported and this function operates similar to getattr(obj, attr). - """ - - if allow_dotted_names: - attrs = attr.split('.') - else: - attrs = [attr] - - for i in attrs: - if i.startswith('_'): - raise AttributeError( - 'attempt to access private attribute "%s"' % i - ) - else: - obj = getattr(obj,i) - return obj - -def list_public_methods(obj): - """Returns a list of attribute strings, found in the specified - object, which represent callable attributes""" - - return [member for member in dir(obj) - if not member.startswith('_') and - callable(getattr(obj, member))] - -class SimpleXMLRPCDispatcher(object): - """Mix-in class that dispatches XML-RPC requests. - - This class is used to register XML-RPC method handlers - and then to dispatch them. This class doesn't need to be - instanced directly when used by SimpleXMLRPCServer but it - can be instanced when used by the MultiPathXMLRPCServer - """ - - def __init__(self, allow_none=False, encoding=None, - use_builtin_types=False): - self.funcs = {} - self.instance = None - self.allow_none = allow_none - self.encoding = encoding or 'utf-8' - self.use_builtin_types = use_builtin_types - - def register_instance(self, instance, allow_dotted_names=False): - """Registers an instance to respond to XML-RPC requests. - - Only one instance can be installed at a time. - - If the registered instance has a _dispatch method then that - method will be called with the name of the XML-RPC method and - its parameters as a tuple - e.g. instance._dispatch('add',(2,3)) - - If the registered instance does not have a _dispatch method - then the instance will be searched to find a matching method - and, if found, will be called. Methods beginning with an '_' - are considered private and will not be called by - SimpleXMLRPCServer. - - If a registered function matches a XML-RPC request, then it - will be called instead of the registered instance. - - If the optional allow_dotted_names argument is true and the - instance does not have a _dispatch method, method names - containing dots are supported and resolved, as long as none of - the name segments start with an '_'. - - *** SECURITY WARNING: *** - - Enabling the allow_dotted_names options allows intruders - to access your module's global variables and may allow - intruders to execute arbitrary code on your machine. Only - use this option on a secure, closed network. - - """ - - self.instance = instance - self.allow_dotted_names = allow_dotted_names - - def register_function(self, function, name=None): - """Registers a function to respond to XML-RPC requests. - - The optional name argument can be used to set a Unicode name - for the function. - """ - - if name is None: - name = function.__name__ - self.funcs[name] = function - - def register_introspection_functions(self): - """Registers the XML-RPC introspection methods in the system - namespace. - - see http://xmlrpc.usefulinc.com/doc/reserved.html - """ - - self.funcs.update({'system.listMethods' : self.system_listMethods, - 'system.methodSignature' : self.system_methodSignature, - 'system.methodHelp' : self.system_methodHelp}) - - def register_multicall_functions(self): - """Registers the XML-RPC multicall method in the system - namespace. - - see http://www.xmlrpc.com/discuss/msgReader$1208""" - - self.funcs.update({'system.multicall' : self.system_multicall}) - - def _marshaled_dispatch(self, data, dispatch_method = None, path = None): - """Dispatches an XML-RPC method from marshalled (XML) data. - - XML-RPC methods are dispatched from the marshalled (XML) data - using the _dispatch method and the result is returned as - marshalled data. For backwards compatibility, a dispatch - function can be provided as an argument (see comment in - SimpleXMLRPCRequestHandler.do_POST) but overriding the - existing method through subclassing is the preferred means - of changing method dispatch behavior. - """ - - try: - params, method = loads(data, use_builtin_types=self.use_builtin_types) - - # generate response - if dispatch_method is not None: - response = dispatch_method(method, params) - else: - response = self._dispatch(method, params) - # wrap response in a singleton tuple - response = (response,) - response = dumps(response, methodresponse=1, - allow_none=self.allow_none, encoding=self.encoding) - except Fault as fault: - response = dumps(fault, allow_none=self.allow_none, - encoding=self.encoding) - except: - # report exception back to server - exc_type, exc_value, exc_tb = sys.exc_info() - response = dumps( - Fault(1, "%s:%s" % (exc_type, exc_value)), - encoding=self.encoding, allow_none=self.allow_none, - ) - - return response.encode(self.encoding) - - def system_listMethods(self): - """system.listMethods() => ['add', 'subtract', 'multiple'] - - Returns a list of the methods supported by the server.""" - - methods = set(self.funcs.keys()) - if self.instance is not None: - # Instance can implement _listMethod to return a list of - # methods - if hasattr(self.instance, '_listMethods'): - methods |= set(self.instance._listMethods()) - # if the instance has a _dispatch method then we - # don't have enough information to provide a list - # of methods - elif not hasattr(self.instance, '_dispatch'): - methods |= set(list_public_methods(self.instance)) - return sorted(methods) - - def system_methodSignature(self, method_name): - """system.methodSignature('add') => [double, int, int] - - Returns a list describing the signature of the method. In the - above example, the add method takes two integers as arguments - and returns a double result. - - This server does NOT support system.methodSignature.""" - - # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html - - return 'signatures not supported' - - def system_methodHelp(self, method_name): - """system.methodHelp('add') => "Adds two integers together" - - Returns a string containing documentation for the specified method.""" - - method = None - if method_name in self.funcs: - method = self.funcs[method_name] - elif self.instance is not None: - # Instance can implement _methodHelp to return help for a method - if hasattr(self.instance, '_methodHelp'): - return self.instance._methodHelp(method_name) - # if the instance has a _dispatch method then we - # don't have enough information to provide help - elif not hasattr(self.instance, '_dispatch'): - try: - method = resolve_dotted_attribute( - self.instance, - method_name, - self.allow_dotted_names - ) - except AttributeError: - pass - - # Note that we aren't checking that the method actually - # be a callable object of some kind - if method is None: - return "" - else: - return pydoc.getdoc(method) - - def system_multicall(self, call_list): - """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \ -[[4], ...] - - Allows the caller to package multiple XML-RPC calls into a single - request. - - See http://www.xmlrpc.com/discuss/msgReader$1208 - """ - - results = [] - for call in call_list: - method_name = call['methodName'] - params = call['params'] - - try: - # XXX A marshalling error in any response will fail the entire - # multicall. If someone cares they should fix this. - results.append([self._dispatch(method_name, params)]) - except Fault as fault: - results.append( - {'faultCode' : fault.faultCode, - 'faultString' : fault.faultString} - ) - except: - exc_type, exc_value, exc_tb = sys.exc_info() - results.append( - {'faultCode' : 1, - 'faultString' : "%s:%s" % (exc_type, exc_value)} - ) - return results - - def _dispatch(self, method, params): - """Dispatches the XML-RPC method. - - XML-RPC calls are forwarded to a registered function that - matches the called XML-RPC method name. If no such function - exists then the call is forwarded to the registered instance, - if available. - - If the registered instance has a _dispatch method then that - method will be called with the name of the XML-RPC method and - its parameters as a tuple - e.g. instance._dispatch('add',(2,3)) - - If the registered instance does not have a _dispatch method - then the instance will be searched to find a matching method - and, if found, will be called. - - Methods beginning with an '_' are considered private and will - not be called. - """ - - func = None - try: - # check to see if a matching function has been registered - func = self.funcs[method] - except KeyError: - if self.instance is not None: - # check for a _dispatch method - if hasattr(self.instance, '_dispatch'): - return self.instance._dispatch(method, params) - else: - # call instance method directly - try: - func = resolve_dotted_attribute( - self.instance, - method, - self.allow_dotted_names - ) - except AttributeError: - pass - - if func is not None: - return func(*params) - else: - raise Exception('method "%s" is not supported' % method) - -class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler): - """Simple XML-RPC request handler class. - - Handles all HTTP POST requests and attempts to decode them as - XML-RPC requests. - """ - - # Class attribute listing the accessible path components; - # paths not on this list will result in a 404 error. - rpc_paths = ('/', '/RPC2') - - #if not None, encode responses larger than this, if possible - encode_threshold = 1400 #a common MTU - - #Override form StreamRequestHandler: full buffering of output - #and no Nagle. - wbufsize = -1 - disable_nagle_algorithm = True - - # a re to match a gzip Accept-Encoding - aepattern = re.compile(r""" - \s* ([^\s;]+) \s* #content-coding - (;\s* q \s*=\s* ([0-9\.]+))? #q - """, re.VERBOSE | re.IGNORECASE) - - def accept_encodings(self): - r = {} - ae = self.headers.get("Accept-Encoding", "") - for e in ae.split(","): - match = self.aepattern.match(e) - if match: - v = match.group(3) - v = float(v) if v else 1.0 - r[match.group(1)] = v - return r - - def is_rpc_path_valid(self): - if self.rpc_paths: - return self.path in self.rpc_paths - else: - # If .rpc_paths is empty, just assume all paths are legal - return True - - def do_POST(self): - """Handles the HTTP POST request. - - Attempts to interpret all HTTP POST requests as XML-RPC calls, - which are forwarded to the server's _dispatch method for handling. - """ - - # Check that the path is legal - if not self.is_rpc_path_valid(): - self.report_404() - return - - try: - # Get arguments by reading body of request. - # We read this in chunks to avoid straining - # socket.read(); around the 10 or 15Mb mark, some platforms - # begin to have problems (bug #792570). - max_chunk_size = 10*1024*1024 - size_remaining = int(self.headers["content-length"]) - L = [] - while size_remaining: - chunk_size = min(size_remaining, max_chunk_size) - chunk = self.rfile.read(chunk_size) - if not chunk: - break - L.append(chunk) - size_remaining -= len(L[-1]) - data = b''.join(L) - - data = self.decode_request_content(data) - if data is None: - return #response has been sent - - # In previous versions of SimpleXMLRPCServer, _dispatch - # could be overridden in this class, instead of in - # SimpleXMLRPCDispatcher. To maintain backwards compatibility, - # check to see if a subclass implements _dispatch and dispatch - # using that method if present. - response = self.server._marshaled_dispatch( - data, getattr(self, '_dispatch', None), self.path - ) - except Exception as e: # This should only happen if the module is buggy - # internal error, report as HTTP server error - self.send_response(500) - - # Send information about the exception if requested - if hasattr(self.server, '_send_traceback_header') and \ - self.server._send_traceback_header: - self.send_header("X-exception", str(e)) - trace = traceback.format_exc() - trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') - self.send_header("X-traceback", trace) - - self.send_header("Content-length", "0") - self.end_headers() - else: - self.send_response(200) - self.send_header("Content-type", "text/xml") - if self.encode_threshold is not None: - if len(response) > self.encode_threshold: - q = self.accept_encodings().get("gzip", 0) - if q: - try: - response = gzip_encode(response) - self.send_header("Content-Encoding", "gzip") - except NotImplementedError: - pass - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - - def decode_request_content(self, data): - #support gzip encoding of request - encoding = self.headers.get("content-encoding", "identity").lower() - if encoding == "identity": - return data - if encoding == "gzip": - try: - return gzip_decode(data) - except NotImplementedError: - self.send_response(501, "encoding %r not supported" % encoding) - except ValueError: - self.send_response(400, "error decoding gzip content") - else: - self.send_response(501, "encoding %r not supported" % encoding) - self.send_header("Content-length", "0") - self.end_headers() - - def report_404 (self): - # Report a 404 error - self.send_response(404) - response = b'No such page' - self.send_header("Content-type", "text/plain") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - - def log_request(self, code='-', size='-'): - """Selectively log an accepted request.""" - - if self.server.logRequests: - BaseHTTPRequestHandler.log_request(self, code, size) - -class SimpleXMLRPCServer(socketserver.TCPServer, - SimpleXMLRPCDispatcher): - """Simple XML-RPC server. - - Simple XML-RPC server that allows functions and a single instance - to be installed to handle requests. The default implementation - attempts to dispatch XML-RPC calls to the functions or instance - installed in the server. Override the _dispatch method inherited - from SimpleXMLRPCDispatcher to change this behavior. - """ - - allow_reuse_address = True - - # Warning: this is for debugging purposes only! Never set this to True in - # production code, as will be sending out sensitive information (exception - # and stack trace details) when exceptions are raised inside - # SimpleXMLRPCRequestHandler.do_POST - _send_traceback_header = False - - def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - self.logRequests = logRequests - - SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) - socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate) - - # [Bug #1222790] If possible, set close-on-exec flag; if a - # method spawns a subprocess, the subprocess shouldn't have - # the listening socket open. - if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'): - flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD) - flags |= fcntl.FD_CLOEXEC - fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags) - -class MultiPathXMLRPCServer(SimpleXMLRPCServer): - """Multipath XML-RPC Server - This specialization of SimpleXMLRPCServer allows the user to create - multiple Dispatcher instances and assign them to different - HTTP request paths. This makes it possible to run two or more - 'virtual XML-RPC servers' at the same port. - Make sure that the requestHandler accepts the paths in question. - """ - def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - - SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none, - encoding, bind_and_activate, use_builtin_types) - self.dispatchers = {} - self.allow_none = allow_none - self.encoding = encoding or 'utf-8' - - def add_dispatcher(self, path, dispatcher): - self.dispatchers[path] = dispatcher - return dispatcher - - def get_dispatcher(self, path): - return self.dispatchers[path] - - def _marshaled_dispatch(self, data, dispatch_method = None, path = None): - try: - response = self.dispatchers[path]._marshaled_dispatch( - data, dispatch_method, path) - except: - # report low level exception back to server - # (each dispatcher should have handled their own - # exceptions) - exc_type, exc_value = sys.exc_info()[:2] - response = dumps( - Fault(1, "%s:%s" % (exc_type, exc_value)), - encoding=self.encoding, allow_none=self.allow_none) - response = response.encode(self.encoding) - return response - -class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher): - """Simple handler for XML-RPC data passed through CGI.""" - - def __init__(self, allow_none=False, encoding=None, use_builtin_types=False): - SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) - - def handle_xmlrpc(self, request_text): - """Handle a single XML-RPC request""" - - response = self._marshaled_dispatch(request_text) - - print('Content-Type: text/xml') - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def handle_get(self): - """Handle a single HTTP GET request. - - Default implementation indicates an error because - XML-RPC uses the POST method. - """ - - code = 400 - message, explain = BaseHTTPRequestHandler.responses[code] - - response = http_server.DEFAULT_ERROR_MESSAGE % \ - { - 'code' : code, - 'message' : message, - 'explain' : explain - } - response = response.encode('utf-8') - print('Status: %d %s' % (code, message)) - print('Content-Type: %s' % http_server.DEFAULT_ERROR_CONTENT_TYPE) - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def handle_request(self, request_text=None): - """Handle a single XML-RPC request passed through a CGI post method. - - If no XML data is given then it is read from stdin. The resulting - XML-RPC response is printed to stdout along with the correct HTTP - headers. - """ - - if request_text is None and \ - os.environ.get('REQUEST_METHOD', None) == 'GET': - self.handle_get() - else: - # POST data is normally available through stdin - try: - length = int(os.environ.get('CONTENT_LENGTH', None)) - except (ValueError, TypeError): - length = -1 - if request_text is None: - request_text = sys.stdin.read(length) - - self.handle_xmlrpc(request_text) - - -# ----------------------------------------------------------------------------- -# Self documenting XML-RPC Server. - -class ServerHTMLDoc(pydoc.HTMLDoc): - """Class used to generate pydoc HTML document for a server""" - - def markup(self, text, escape=None, funcs={}, classes={}, methods={}): - """Mark up some plain text, given a context of symbols to look for. - Each context dictionary maps object names to anchor names.""" - escape = escape or self.escape - results = [] - here = 0 - - # XXX Note that this regular expression does not allow for the - # hyperlinking of arbitrary strings being used as method - # names. Only methods with names consisting of word characters - # and '.'s are hyperlinked. - pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|' - r'RFC[- ]?(\d+)|' - r'PEP[- ]?(\d+)|' - r'(self\.)?((?:\w|\.)+))\b') - while 1: - match = pattern.search(text, here) - if not match: break - start, end = match.span() - results.append(escape(text[here:start])) - - all, scheme, rfc, pep, selfdot, name = match.groups() - if scheme: - url = escape(all).replace('"', '"') - results.append('%s' % (url, url)) - elif rfc: - url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) - results.append('%s' % (url, escape(all))) - elif pep: - url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep) - results.append('%s' % (url, escape(all))) - elif text[end:end+1] == '(': - results.append(self.namelink(name, methods, funcs, classes)) - elif selfdot: - results.append('self.%s' % name) - else: - results.append(self.namelink(name, classes)) - here = end - results.append(escape(text[here:])) - return ''.join(results) - - def docroutine(self, object, name, mod=None, - funcs={}, classes={}, methods={}, cl=None): - """Produce HTML documentation for a function or method object.""" - - anchor = (cl and cl.__name__ or '') + '-' + name - note = '' - - title = '%s' % ( - self.escape(anchor), self.escape(name)) - - if inspect.ismethod(object): - args = inspect.getfullargspec(object) - # exclude the argument bound to the instance, it will be - # confusing to the non-Python user - argspec = inspect.formatargspec ( - args.args[1:], - args.varargs, - args.varkw, - args.defaults, - annotations=args.annotations, - formatvalue=self.formatvalue - ) - elif inspect.isfunction(object): - args = inspect.getfullargspec(object) - argspec = inspect.formatargspec( - args.args, args.varargs, args.varkw, args.defaults, - annotations=args.annotations, - formatvalue=self.formatvalue) - else: - argspec = '(...)' - - if isinstance(object, tuple): - argspec = object[0] or argspec - docstring = object[1] or "" - else: - docstring = pydoc.getdoc(object) - - decl = title + argspec + (note and self.grey( - '%s' % note)) - - doc = self.markup( - docstring, self.preformat, funcs, classes, methods) - doc = doc and '

%s
' % doc - return '
%s
%s
\n' % (decl, doc) - - def docserver(self, server_name, package_documentation, methods): - """Produce HTML documentation for an XML-RPC server.""" - - fdict = {} - for key, value in methods.items(): - fdict[key] = '#-' + key - fdict[value] = fdict[key] - - server_name = self.escape(server_name) - head = '%s' % server_name - result = self.heading(head, '#ffffff', '#7799ee') - - doc = self.markup(package_documentation, self.preformat, fdict) - doc = doc and '%s' % doc - result = result + '

%s

\n' % doc - - contents = [] - method_items = sorted(methods.items()) - for key, value in method_items: - contents.append(self.docroutine(value, key, funcs=fdict)) - result = result + self.bigsection( - 'Methods', '#ffffff', '#eeaa77', ''.join(contents)) - - return result - -class XMLRPCDocGenerator(object): - """Generates documentation for an XML-RPC server. - - This class is designed as mix-in and should not - be constructed directly. - """ - - def __init__(self): - # setup variables used for HTML documentation - self.server_name = 'XML-RPC Server Documentation' - self.server_documentation = \ - "This server exports the following methods through the XML-RPC "\ - "protocol." - self.server_title = 'XML-RPC Server Documentation' - - def set_server_title(self, server_title): - """Set the HTML title of the generated server documentation""" - - self.server_title = server_title - - def set_server_name(self, server_name): - """Set the name of the generated HTML server documentation""" - - self.server_name = server_name - - def set_server_documentation(self, server_documentation): - """Set the documentation string for the entire server.""" - - self.server_documentation = server_documentation - - def generate_html_documentation(self): - """generate_html_documentation() => html documentation for the server - - Generates HTML documentation for the server using introspection for - installed functions and instances that do not implement the - _dispatch method. Alternatively, instances can choose to implement - the _get_method_argstring(method_name) method to provide the - argument string used in the documentation and the - _methodHelp(method_name) method to provide the help text used - in the documentation.""" - - methods = {} - - for method_name in self.system_listMethods(): - if method_name in self.funcs: - method = self.funcs[method_name] - elif self.instance is not None: - method_info = [None, None] # argspec, documentation - if hasattr(self.instance, '_get_method_argstring'): - method_info[0] = self.instance._get_method_argstring(method_name) - if hasattr(self.instance, '_methodHelp'): - method_info[1] = self.instance._methodHelp(method_name) - - method_info = tuple(method_info) - if method_info != (None, None): - method = method_info - elif not hasattr(self.instance, '_dispatch'): - try: - method = resolve_dotted_attribute( - self.instance, - method_name - ) - except AttributeError: - method = method_info - else: - method = method_info - else: - assert 0, "Could not find method in self.functions and no "\ - "instance installed" - - methods[method_name] = method - - documenter = ServerHTMLDoc() - documentation = documenter.docserver( - self.server_name, - self.server_documentation, - methods - ) - - return documenter.page(self.server_title, documentation) - -class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): - """XML-RPC and documentation request handler class. - - Handles all HTTP POST requests and attempts to decode them as - XML-RPC requests. - - Handles all HTTP GET requests and interprets them as requests - for documentation. - """ - - def do_GET(self): - """Handles the HTTP GET request. - - Interpret all HTTP GET requests as requests for server - documentation. - """ - # Check that the path is legal - if not self.is_rpc_path_valid(): - self.report_404() - return - - response = self.server.generate_html_documentation().encode('utf-8') - self.send_response(200) - self.send_header("Content-type", "text/html") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - -class DocXMLRPCServer( SimpleXMLRPCServer, - XMLRPCDocGenerator): - """XML-RPC and HTML documentation server. - - Adds the ability to serve server documentation to the capabilities - of SimpleXMLRPCServer. - """ - - def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None, - bind_and_activate=True, use_builtin_types=False): - SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, - allow_none, encoding, bind_and_activate, - use_builtin_types) - XMLRPCDocGenerator.__init__(self) - -class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler, - XMLRPCDocGenerator): - """Handler for XML-RPC data and documentation requests passed through - CGI""" - - def handle_get(self): - """Handles the HTTP GET request. - - Interpret all HTTP GET requests as requests for server - documentation. - """ - - response = self.generate_html_documentation().encode('utf-8') - - print('Content-Type: text/html') - print('Content-Length: %d' % len(response)) - print() - sys.stdout.flush() - sys.stdout.buffer.write(response) - sys.stdout.buffer.flush() - - def __init__(self): - CGIXMLRPCRequestHandler.__init__(self) - XMLRPCDocGenerator.__init__(self) - - -if __name__ == '__main__': - import datetime - - class ExampleService: - def getData(self): - return '42' - - class currentTime: - @staticmethod - def getCurrentTime(): - return datetime.datetime.now() - - server = SimpleXMLRPCServer(("localhost", 8000)) - server.register_function(pow) - server.register_function(lambda x,y: x+y, 'add') - server.register_instance(ExampleService(), allow_dotted_names=True) - server.register_multicall_functions() - print('Serving XML-RPC on localhost port 8000') - print('It is advisable to run this example server within a secure, closed network.') - try: - server.serve_forever() - except KeyboardInterrupt: - print("\nKeyboard interrupt received, exiting.") - server.server_close() - sys.exit(0) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/__init__.py deleted file mode 100755 index 8bc1649..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -A module that brings in equivalents of the new and modified Python 3 -builtins into Py2. Has no effect on Py3. - -See the docs `here `_ -(``docs/what-else.rst``) for more information. - -""" - -from future.builtins.iterators import (filter, map, zip) -# The isinstance import is no longer needed. We provide it only for -# backward-compatibility with future v0.8.2. It will be removed in future v1.0. -from future.builtins.misc import (ascii, chr, hex, input, isinstance, next, - oct, open, pow, round, super, max, min) -from future.utils import PY3 - -if PY3: - import builtins - bytes = builtins.bytes - dict = builtins.dict - int = builtins.int - list = builtins.list - object = builtins.object - range = builtins.range - str = builtins.str - __all__ = [] -else: - from future.types import (newbytes as bytes, - newdict as dict, - newint as int, - newlist as list, - newobject as object, - newrange as range, - newstr as str) -from future import utils - - -if not utils.PY3: - # We only import names that shadow the builtins on Py2. No other namespace - # pollution on Py2. - - # Only shadow builtins on Py2; no new names - __all__ = ['filter', 'map', 'zip', - 'ascii', 'chr', 'hex', 'input', 'next', 'oct', 'open', 'pow', - 'round', 'super', - 'bytes', 'dict', 'int', 'list', 'object', 'range', 'str', 'max', 'min' - ] - -else: - # No namespace pollution on Py3 - __all__ = [] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/disabled.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/disabled.py deleted file mode 100755 index f6d6ea9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/disabled.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -This disables builtin functions (and one exception class) which are -removed from Python 3.3. - -This module is designed to be used like this:: - - from future.builtins.disabled import * - -This disables the following obsolete Py2 builtin functions:: - - apply, cmp, coerce, execfile, file, input, long, - raw_input, reduce, reload, unicode, xrange - -We don't hack __builtin__, which is very fragile because it contaminates -imported modules too. Instead, we just create new functions with -the same names as the obsolete builtins from Python 2 which raise -NameError exceptions when called. - -Note that both ``input()`` and ``raw_input()`` are among the disabled -functions (in this module). Although ``input()`` exists as a builtin in -Python 3, the Python 2 ``input()`` builtin is unsafe to use because it -can lead to shell injection. Therefore we shadow it by default upon ``from -future.builtins.disabled import *``, in case someone forgets to import our -replacement ``input()`` somehow and expects Python 3 semantics. - -See the ``future.builtins.misc`` module for a working version of -``input`` with Python 3 semantics. - -(Note that callable() is not among the functions disabled; this was -reintroduced into Python 3.2.) - -This exception class is also disabled: - - StandardError - -""" - -from __future__ import division, absolute_import, print_function - -from future import utils - - -OBSOLETE_BUILTINS = ['apply', 'chr', 'cmp', 'coerce', 'execfile', 'file', - 'input', 'long', 'raw_input', 'reduce', 'reload', - 'unicode', 'xrange', 'StandardError'] - - -def disabled_function(name): - ''' - Returns a function that cannot be called - ''' - def disabled(*args, **kwargs): - ''' - A function disabled by the ``future`` module. This function is - no longer a builtin in Python 3. - ''' - raise NameError('obsolete Python 2 builtin {0} is disabled'.format(name)) - return disabled - - -if not utils.PY3: - for fname in OBSOLETE_BUILTINS: - locals()[fname] = disabled_function(fname) - __all__ = OBSOLETE_BUILTINS -else: - __all__ = [] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/iterators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/iterators.py deleted file mode 100755 index dff651e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/iterators.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -This module is designed to be used as follows:: - - from future.builtins.iterators import * - -And then, for example:: - - for i in range(10**15): - pass - - for (a, b) in zip(range(10**15), range(-10**15, 0)): - pass - -Note that this is standard Python 3 code, plus some imports that do -nothing on Python 3. - -The iterators this brings in are:: - -- ``range`` -- ``filter`` -- ``map`` -- ``zip`` - -On Python 2, ``range`` is a pure-Python backport of Python 3's ``range`` -iterator with slicing support. The other iterators (``filter``, ``map``, -``zip``) are from the ``itertools`` module on Python 2. On Python 3 these -are available in the module namespace but not exported for * imports via -__all__ (zero no namespace pollution). - -Note that these are also available in the standard library -``future_builtins`` module on Python 2 -- but not Python 3, so using -the standard library version is not portable, nor anywhere near complete. -""" - -from __future__ import division, absolute_import, print_function - -import itertools -from future import utils - -if not utils.PY3: - filter = itertools.ifilter - map = itertools.imap - from future.types import newrange as range - zip = itertools.izip - __all__ = ['filter', 'map', 'range', 'zip'] -else: - import builtins - filter = builtins.filter - map = builtins.map - range = builtins.range - zip = builtins.zip - __all__ = [] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/misc.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/misc.py deleted file mode 100755 index f86ce5f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/misc.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -A module that brings in equivalents of various modified Python 3 builtins -into Py2. Has no effect on Py3. - -The builtin functions are: - -- ``ascii`` (from Py2's future_builtins module) -- ``hex`` (from Py2's future_builtins module) -- ``oct`` (from Py2's future_builtins module) -- ``chr`` (equivalent to ``unichr`` on Py2) -- ``input`` (equivalent to ``raw_input`` on Py2) -- ``next`` (calls ``__next__`` if it exists, else ``next`` method) -- ``open`` (equivalent to io.open on Py2) -- ``super`` (backport of Py3's magic zero-argument super() function -- ``round`` (new "Banker's Rounding" behaviour from Py3) -- ``max`` (new default option from Py3.4) -- ``min`` (new default option from Py3.4) - -``isinstance`` is also currently exported for backwards compatibility -with v0.8.2, although this has been deprecated since v0.9. - - -input() -------- -Like the new ``input()`` function from Python 3 (without eval()), except -that it returns bytes. Equivalent to Python 2's ``raw_input()``. - -Warning: By default, importing this module *removes* the old Python 2 -input() function entirely from ``__builtin__`` for safety. This is -because forgetting to import the new ``input`` from ``future`` might -otherwise lead to a security vulnerability (shell injection) on Python 2. - -To restore it, you can retrieve it yourself from -``__builtin__._old_input``. - -Fortunately, ``input()`` seems to be seldom used in the wild in Python -2... - -""" - -from future import utils - - -if utils.PY2: - from io import open - from future_builtins import ascii, oct, hex - from __builtin__ import unichr as chr, pow as _builtin_pow - import __builtin__ - - # Only for backward compatibility with future v0.8.2: - isinstance = __builtin__.isinstance - - # Warning: Python 2's input() is unsafe and MUST not be able to be used - # accidentally by someone who expects Python 3 semantics but forgets - # to import it on Python 2. Versions of ``future`` prior to 0.11 - # deleted it from __builtin__. Now we keep in __builtin__ but shadow - # the name like all others. Just be sure to import ``input``. - - input = raw_input - - from future.builtins.newnext import newnext as next - from future.builtins.newround import newround as round - from future.builtins.newsuper import newsuper as super - from future.builtins.new_min_max import newmax as max - from future.builtins.new_min_max import newmin as min - from future.types.newint import newint - - _SENTINEL = object() - - def pow(x, y, z=_SENTINEL): - """ - pow(x, y[, z]) -> number - - With two arguments, equivalent to x**y. With three arguments, - equivalent to (x**y) % z, but may be more efficient (e.g. for ints). - """ - # Handle newints - if isinstance(x, newint): - x = long(x) - if isinstance(y, newint): - y = long(y) - if isinstance(z, newint): - z = long(z) - - try: - if z == _SENTINEL: - return _builtin_pow(x, y) - else: - return _builtin_pow(x, y, z) - except ValueError: - if z == _SENTINEL: - return _builtin_pow(x+0j, y) - else: - return _builtin_pow(x+0j, y, z) - - - # ``future`` doesn't support Py3.0/3.1. If we ever did, we'd add this: - # callable = __builtin__.callable - - __all__ = ['ascii', 'chr', 'hex', 'input', 'isinstance', 'next', 'oct', - 'open', 'pow', 'round', 'super', 'max', 'min'] - -else: - import builtins - ascii = builtins.ascii - chr = builtins.chr - hex = builtins.hex - input = builtins.input - next = builtins.next - # Only for backward compatibility with future v0.8.2: - isinstance = builtins.isinstance - oct = builtins.oct - open = builtins.open - pow = builtins.pow - round = builtins.round - super = builtins.super - if utils.PY34_PLUS: - max = builtins.max - min = builtins.min - __all__ = [] - else: - from future.builtins.new_min_max import newmax as max - from future.builtins.new_min_max import newmin as min - __all__ = ['min', 'max'] - - # The callable() function was removed from Py3.0 and 3.1 and - # reintroduced into Py3.2+. ``future`` doesn't support Py3.0/3.1. If we ever - # did, we'd add this: - # try: - # callable = builtins.callable - # except AttributeError: - # # Definition from Pandas - # def callable(obj): - # return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - # __all__.append('callable') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/new_min_max.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/new_min_max.py deleted file mode 100755 index 6f0c2a8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/new_min_max.py +++ /dev/null @@ -1,59 +0,0 @@ -import itertools - -from future import utils -if utils.PY2: - from __builtin__ import max as _builtin_max, min as _builtin_min -else: - from builtins import max as _builtin_max, min as _builtin_min - -_SENTINEL = object() - - -def newmin(*args, **kwargs): - return new_min_max(_builtin_min, *args, **kwargs) - - -def newmax(*args, **kwargs): - return new_min_max(_builtin_max, *args, **kwargs) - - -def new_min_max(_builtin_func, *args, **kwargs): - """ - To support the argument "default" introduced in python 3.4 for min and max - :param _builtin_func: builtin min or builtin max - :param args: - :param kwargs: - :return: returns the min or max based on the arguments passed - """ - - for key, _ in kwargs.items(): - if key not in set(['key', 'default']): - raise TypeError('Illegal argument %s', key) - - if len(args) == 0: - raise TypeError - - if len(args) != 1 and kwargs.get('default', _SENTINEL) is not _SENTINEL: - raise TypeError - - if len(args) == 1: - iterator = iter(args[0]) - try: - first = next(iterator) - except StopIteration: - if kwargs.get('default', _SENTINEL) is not _SENTINEL: - return kwargs.get('default') - else: - raise ValueError('{}() arg is an empty sequence'.format(_builtin_func.__name__)) - else: - iterator = itertools.chain([first], iterator) - if kwargs.get('key') is not None: - return _builtin_func(iterator, key=kwargs.get('key')) - else: - return _builtin_func(iterator) - - if len(args) > 1: - if kwargs.get('key') is not None: - return _builtin_func(args, key=kwargs.get('key')) - else: - return _builtin_func(args) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newnext.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newnext.py deleted file mode 100755 index 097638a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newnext.py +++ /dev/null @@ -1,70 +0,0 @@ -''' -This module provides a newnext() function in Python 2 that mimics the -behaviour of ``next()`` in Python 3, falling back to Python 2's behaviour for -compatibility if this fails. - -``newnext(iterator)`` calls the iterator's ``__next__()`` method if it exists. If this -doesn't exist, it falls back to calling a ``next()`` method. - -For example: - - >>> class Odds(object): - ... def __init__(self, start=1): - ... self.value = start - 2 - ... def __next__(self): # note the Py3 interface - ... self.value += 2 - ... return self.value - ... def __iter__(self): - ... return self - ... - >>> iterator = Odds() - >>> next(iterator) - 1 - >>> next(iterator) - 3 - -If you are defining your own custom iterator class as above, it is preferable -to explicitly decorate the class with the @implements_iterator decorator from -``future.utils`` as follows: - - >>> @implements_iterator - ... class Odds(object): - ... # etc - ... pass - -This next() function is primarily for consuming iterators defined in Python 3 -code elsewhere that we would like to run on Python 2 or 3. -''' - -_builtin_next = next - -_SENTINEL = object() - -def newnext(iterator, default=_SENTINEL): - """ - next(iterator[, default]) - - Return the next item from the iterator. If default is given and the iterator - is exhausted, it is returned instead of raising StopIteration. - """ - - # args = [] - # if default is not _SENTINEL: - # args.append(default) - try: - try: - return iterator.__next__() - except AttributeError: - try: - return iterator.next() - except AttributeError: - raise TypeError("'{0}' object is not an iterator".format( - iterator.__class__.__name__)) - except StopIteration as e: - if default is _SENTINEL: - raise e - else: - return default - - -__all__ = ['newnext'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newround.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newround.py deleted file mode 100755 index 394a2c6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newround.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -``python-future``: pure Python implementation of Python 3 round(). -""" - -from future.utils import PYPY, PY26, bind_method - -# Use the decimal module for simplicity of implementation (and -# hopefully correctness). -from decimal import Decimal, ROUND_HALF_EVEN - - -def newround(number, ndigits=None): - """ - See Python 3 documentation: uses Banker's Rounding. - - Delegates to the __round__ method if for some reason this exists. - - If not, rounds a number to a given precision in decimal digits (default - 0 digits). This returns an int when called with one argument, - otherwise the same type as the number. ndigits may be negative. - - See the test_round method in future/tests/test_builtins.py for - examples. - """ - return_int = False - if ndigits is None: - return_int = True - ndigits = 0 - if hasattr(number, '__round__'): - return number.__round__(ndigits) - - if ndigits < 0: - raise NotImplementedError('negative ndigits not supported yet') - exponent = Decimal('10') ** (-ndigits) - - if PYPY: - # Work around issue #24: round() breaks on PyPy with NumPy's types - if 'numpy' in repr(type(number)): - number = float(number) - - if isinstance(number, Decimal): - d = number - else: - if not PY26: - d = Decimal.from_float(number).quantize(exponent, - rounding=ROUND_HALF_EVEN) - else: - d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN) - - if return_int: - return int(d) - else: - return float(d) - - -### From Python 2.7's decimal.py. Only needed to support Py2.6: - -def from_float_26(f): - """Converts a float to a decimal number, exactly. - - Note that Decimal.from_float(0.1) is not the same as Decimal('0.1'). - Since 0.1 is not exactly representable in binary floating point, the - value is stored as the nearest representable value which is - 0x1.999999999999ap-4. The exact equivalent of the value in decimal - is 0.1000000000000000055511151231257827021181583404541015625. - - >>> Decimal.from_float(0.1) - Decimal('0.1000000000000000055511151231257827021181583404541015625') - >>> Decimal.from_float(float('nan')) - Decimal('NaN') - >>> Decimal.from_float(float('inf')) - Decimal('Infinity') - >>> Decimal.from_float(-float('inf')) - Decimal('-Infinity') - >>> Decimal.from_float(-0.0) - Decimal('-0') - - """ - import math as _math - from decimal import _dec_from_triple # only available on Py2.6 and Py2.7 (not 3.3) - - if isinstance(f, (int, long)): # handle integer inputs - return Decimal(f) - if _math.isinf(f) or _math.isnan(f): # raises TypeError if not a float - return Decimal(repr(f)) - if _math.copysign(1.0, f) == 1.0: - sign = 0 - else: - sign = 1 - n, d = abs(f).as_integer_ratio() - # int.bit_length() method doesn't exist on Py2.6: - def bit_length(d): - if d != 0: - return len(bin(abs(d))) - 2 - else: - return 0 - k = bit_length(d) - 1 - result = _dec_from_triple(sign, str(n*5**k), -k) - return result - - -__all__ = ['newround'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newsuper.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newsuper.py deleted file mode 100755 index 5d3402b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/builtins/newsuper.py +++ /dev/null @@ -1,114 +0,0 @@ -''' -This module provides a newsuper() function in Python 2 that mimics the -behaviour of super() in Python 3. It is designed to be used as follows: - - from __future__ import division, absolute_import, print_function - from future.builtins import super - -And then, for example: - - class VerboseList(list): - def append(self, item): - print('Adding an item') - super().append(item) # new simpler super() function - -Importing this module on Python 3 has no effect. - -This is based on (i.e. almost identical to) Ryan Kelly's magicsuper -module here: - - https://github.com/rfk/magicsuper.git - -Excerpts from Ryan's docstring: - - "Of course, you can still explicitly pass in the arguments if you want - to do something strange. Sometimes you really do want that, e.g. to - skip over some classes in the method resolution order. - - "How does it work? By inspecting the calling frame to determine the - function object being executed and the object on which it's being - called, and then walking the object's __mro__ chain to find out where - that function was defined. Yuck, but it seems to work..." -''' - -from __future__ import absolute_import -import sys -from types import FunctionType - -from future.utils import PY3, PY26 - - -_builtin_super = super - -_SENTINEL = object() - -def newsuper(typ=_SENTINEL, type_or_obj=_SENTINEL, framedepth=1): - '''Like builtin super(), but capable of magic. - - This acts just like the builtin super() function, but if called - without any arguments it attempts to infer them at runtime. - ''' - # Infer the correct call if used without arguments. - if typ is _SENTINEL: - # We'll need to do some frame hacking. - f = sys._getframe(framedepth) - - try: - # Get the function's first positional argument. - type_or_obj = f.f_locals[f.f_code.co_varnames[0]] - except (IndexError, KeyError,): - raise RuntimeError('super() used in a function with no args') - - try: - # Get the MRO so we can crawl it. - mro = type_or_obj.__mro__ - except (AttributeError, RuntimeError): # see issue #160 - try: - mro = type_or_obj.__class__.__mro__ - except AttributeError: - raise RuntimeError('super() used with a non-newstyle class') - - # A ``for...else`` block? Yes! It's odd, but useful. - # If unfamiliar with for...else, see: - # - # http://psung.blogspot.com/2007/12/for-else-in-python.html - for typ in mro: - # Find the class that owns the currently-executing method. - for meth in typ.__dict__.values(): - # Drill down through any wrappers to the underlying func. - # This handles e.g. classmethod() and staticmethod(). - try: - while not isinstance(meth,FunctionType): - if isinstance(meth, property): - # Calling __get__ on the property will invoke - # user code which might throw exceptions or have - # side effects - meth = meth.fget - else: - try: - meth = meth.__func__ - except AttributeError: - meth = meth.__get__(type_or_obj, typ) - except (AttributeError, TypeError): - continue - if meth.func_code is f.f_code: - break # Aha! Found you. - else: - continue # Not found! Move onto the next class in MRO. - break # Found! Break out of the search loop. - else: - raise RuntimeError('super() called outside a method') - - # Dispatch to builtin super(). - if type_or_obj is not _SENTINEL: - return _builtin_super(typ, type_or_obj) - return _builtin_super(typ) - - -def superm(*args, **kwds): - f = sys._getframe(1) - nm = f.f_code.co_name - return getattr(newsuper(framedepth=2),nm)(*args, **kwds) - - -__all__ = ['newsuper'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/__init__.py deleted file mode 100755 index 0cd60d3..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# future.moves package -from __future__ import absolute_import -import sys -__future_module__ = True -from future.standard_library import import_top_level_modules - -if sys.version_info[0] >= 3: - import_top_level_modules() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_dummy_thread.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_dummy_thread.py deleted file mode 100755 index 688d249..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_dummy_thread.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from _dummy_thread import * -else: - __future_module__ = True - from dummy_thread import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_markupbase.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_markupbase.py deleted file mode 100755 index f9fb4bb..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_markupbase.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from _markupbase import * -else: - __future_module__ = True - from markupbase import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_thread.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_thread.py deleted file mode 100755 index c68018b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/_thread.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from _thread import * -else: - __future_module__ = True - from thread import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/builtins.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/builtins.py deleted file mode 100755 index e4b6221..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/builtins.py +++ /dev/null @@ -1,10 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from builtins import * -else: - __future_module__ = True - from __builtin__ import * - # Overwrite any old definitions with the equivalent future.builtins ones: - from future.builtins import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/collections.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/collections.py deleted file mode 100755 index 664ee6a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/collections.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import absolute_import -import sys - -from future.utils import PY2, PY26 -__future_module__ = True - -from collections import * - -if PY2: - from UserDict import UserDict - from UserList import UserList - from UserString import UserString - -if PY26: - from future.backports.misc import OrderedDict, Counter - -if sys.version_info < (3, 3): - from future.backports.misc import ChainMap, _count_elements diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/configparser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/configparser.py deleted file mode 100755 index 33d9cf9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/configparser.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY2 - -if PY2: - from ConfigParser import * -else: - from configparser import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/copyreg.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/copyreg.py deleted file mode 100755 index 9d08cdc..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/copyreg.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - import copyreg, sys - # A "*" import uses Python 3's copyreg.__all__ which does not include - # all public names in the API surface for copyreg, this avoids that - # problem by just making our module _be_ a reference to the actual module. - sys.modules['future.moves.copyreg'] = copyreg -else: - __future_module__ = True - from copy_reg import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/__init__.py deleted file mode 100755 index 626b406..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from dbm import * -else: - __future_module__ = True - from whichdb import * - from anydbm import * - -# Py3.3's dbm/__init__.py imports ndbm but doesn't expose it via __all__. -# In case some (badly written) code depends on dbm.ndbm after import dbm, -# we simulate this: -if PY3: - from dbm import ndbm -else: - try: - from future.moves.dbm import ndbm - except ImportError: - ndbm = None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/dumb.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/dumb.py deleted file mode 100755 index 528383f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/dumb.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from dbm.dumb import * -else: - __future_module__ = True - from dumbdbm import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/gnu.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/gnu.py deleted file mode 100755 index 68ccf67..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/gnu.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from dbm.gnu import * -else: - __future_module__ = True - from gdbm import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/ndbm.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/ndbm.py deleted file mode 100755 index 8c6fff8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/dbm/ndbm.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from dbm.ndbm import * -else: - __future_module__ = True - from dbm import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/__init__.py deleted file mode 100755 index 22ed6e7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 -__future_module__ = True - -if PY3: - from html import * -else: - # cgi.escape isn't good enough for the single Py3.3 html test to pass. - # Define it inline here instead. From the Py3.4 stdlib. Note that the - # html.escape() function from the Py3.3 stdlib is not suitable for use on - # Py2.x. - """ - General functions for HTML manipulation. - """ - - def escape(s, quote=True): - """ - Replace special characters "&", "<" and ">" to HTML-safe sequences. - If the optional flag quote is true (the default), the quotation mark - characters, both double quote (") and single quote (') characters are also - translated. - """ - s = s.replace("&", "&") # Must be done first! - s = s.replace("<", "<") - s = s.replace(">", ">") - if quote: - s = s.replace('"', """) - s = s.replace('\'', "'") - return s - - __all__ = ['escape'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/entities.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/entities.py deleted file mode 100755 index 56a8860..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/entities.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from html.entities import * -else: - __future_module__ = True - from htmlentitydefs import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/parser.py deleted file mode 100755 index a6115b5..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/html/parser.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 -__future_module__ = True - -if PY3: - from html.parser import * -else: - from HTMLParser import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/__init__.py deleted file mode 100755 index 917b3d7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from future.utils import PY3 - -if not PY3: - __future_module__ = True diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/client.py deleted file mode 100755 index 55f9c9c..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/client.py +++ /dev/null @@ -1,8 +0,0 @@ -from future.utils import PY3 - -if PY3: - from http.client import * -else: - from httplib import * - from httplib import HTTPMessage - __future_module__ = True diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/cookiejar.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/cookiejar.py deleted file mode 100755 index ea00df7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/cookiejar.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from http.cookiejar import * -else: - __future_module__ = True - from cookielib import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/cookies.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/cookies.py deleted file mode 100755 index 1b74fe2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/cookies.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from http.cookies import * -else: - __future_module__ = True - from Cookie import * - from Cookie import Morsel # left out of __all__ on Py2.7! diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/server.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/server.py deleted file mode 100755 index 4e75cc1..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/http/server.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from http.server import * -else: - __future_module__ = True - from BaseHTTPServer import * - from CGIHTTPServer import * - from SimpleHTTPServer import * - try: - from CGIHTTPServer import _url_collapse_path # needed for a test - except ImportError: - try: - # Python 2.7.0 to 2.7.3 - from CGIHTTPServer import ( - _url_collapse_path_split as _url_collapse_path) - except ImportError: - # Doesn't exist on Python 2.6.x. Ignore it. - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/itertools.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/itertools.py deleted file mode 100755 index e5eb20d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/itertools.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import - -from itertools import * -try: - zip_longest = izip_longest - filterfalse = ifilterfalse -except NameError: - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/pickle.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/pickle.py deleted file mode 100755 index c53d693..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/pickle.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from pickle import * -else: - __future_module__ = True - try: - from cPickle import * - except ImportError: - from pickle import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/queue.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/queue.py deleted file mode 100755 index 1cb1437..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/queue.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from queue import * -else: - __future_module__ = True - from Queue import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/reprlib.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/reprlib.py deleted file mode 100755 index a313a13..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/reprlib.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from reprlib import * -else: - __future_module__ = True - from repr import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/socketserver.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/socketserver.py deleted file mode 100755 index 062e084..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/socketserver.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from socketserver import * -else: - __future_module__ = True - from SocketServer import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/subprocess.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/subprocess.py deleted file mode 100755 index 43ffd2a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/subprocess.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY2, PY26 - -from subprocess import * - -if PY2: - __future_module__ = True - from commands import getoutput, getstatusoutput - -if PY26: - from future.backports.misc import check_output diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/sys.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/sys.py deleted file mode 100755 index 1293bcb..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/sys.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY2 - -from sys import * - -if PY2: - from __builtin__ import intern diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/__init__.py deleted file mode 100755 index e408296..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 -__future_module__ = True - -if not PY3: - from Tkinter import * - from Tkinter import (_cnfmerge, _default_root, _flatten, - _support_default_root, _test, - _tkinter, _setit) - - try: # >= 2.7.4 - from Tkinter import (_join) - except ImportError: - pass - - try: # >= 2.7.4 - from Tkinter import (_stringify) - except ImportError: - pass - - try: # >= 2.7.9 - from Tkinter import (_splitdict) - except ImportError: - pass - -else: - from tkinter import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/colorchooser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/colorchooser.py deleted file mode 100755 index 6dde6e8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/colorchooser.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.colorchooser import * -else: - try: - from tkColorChooser import * - except ImportError: - raise ImportError('The tkColorChooser module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/commondialog.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/commondialog.py deleted file mode 100755 index eb7ae8d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/commondialog.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.commondialog import * -else: - try: - from tkCommonDialog import * - except ImportError: - raise ImportError('The tkCommonDialog module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/constants.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/constants.py deleted file mode 100755 index ffe0981..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/constants.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.constants import * -else: - try: - from Tkconstants import * - except ImportError: - raise ImportError('The Tkconstants module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/dialog.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/dialog.py deleted file mode 100755 index 113370c..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/dialog.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.dialog import * -else: - try: - from Dialog import * - except ImportError: - raise ImportError('The Dialog module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/dnd.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/dnd.py deleted file mode 100755 index 1ab4379..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/dnd.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.dnd import * -else: - try: - from Tkdnd import * - except ImportError: - raise ImportError('The Tkdnd module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/filedialog.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/filedialog.py deleted file mode 100755 index 973923e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/filedialog.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.filedialog import * -else: - try: - from FileDialog import * - except ImportError: - raise ImportError('The FileDialog module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/font.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/font.py deleted file mode 100755 index 628f399..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/font.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.font import * -else: - try: - from tkFont import * - except ImportError: - raise ImportError('The tkFont module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/messagebox.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/messagebox.py deleted file mode 100755 index b43d870..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/messagebox.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.messagebox import * -else: - try: - from tkMessageBox import * - except ImportError: - raise ImportError('The tkMessageBox module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/scrolledtext.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/scrolledtext.py deleted file mode 100755 index 1c69db6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/scrolledtext.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.scrolledtext import * -else: - try: - from ScrolledText import * - except ImportError: - raise ImportError('The ScrolledText module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/simpledialog.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/simpledialog.py deleted file mode 100755 index dba93fb..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/simpledialog.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.simpledialog import * -else: - try: - from SimpleDialog import * - except ImportError: - raise ImportError('The SimpleDialog module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/tix.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/tix.py deleted file mode 100755 index 8d1718a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/tix.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.tix import * -else: - try: - from Tix import * - except ImportError: - raise ImportError('The Tix module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/ttk.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/ttk.py deleted file mode 100755 index 081c1b4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/tkinter/ttk.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import - -from future.utils import PY3 - -if PY3: - from tkinter.ttk import * -else: - try: - from ttk import * - except ImportError: - raise ImportError('The ttk module is missing. Does your Py2 ' - 'installation include tkinter?') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/__init__.py deleted file mode 100755 index 5cf428b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if not PY3: - __future_module__ = True diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/error.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/error.py deleted file mode 100755 index 7d8ada7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/error.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import absolute_import -from future.standard_library import suspend_hooks - -from future.utils import PY3 - -if PY3: - from urllib.error import * -else: - __future_module__ = True - - # We use this method to get at the original Py2 urllib before any renaming magic - # ContentTooShortError = sys.py2_modules['urllib'].ContentTooShortError - - with suspend_hooks(): - from urllib import ContentTooShortError - from urllib2 import URLError, HTTPError diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/parse.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/parse.py deleted file mode 100755 index 9074b81..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/parse.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import absolute_import -from future.standard_library import suspend_hooks - -from future.utils import PY3 - -if PY3: - from urllib.parse import * -else: - __future_module__ = True - from urlparse import (ParseResult, SplitResult, parse_qs, parse_qsl, - urldefrag, urljoin, urlparse, urlsplit, - urlunparse, urlunsplit) - - # we use this method to get at the original py2 urllib before any renaming - # quote = sys.py2_modules['urllib'].quote - # quote_plus = sys.py2_modules['urllib'].quote_plus - # unquote = sys.py2_modules['urllib'].unquote - # unquote_plus = sys.py2_modules['urllib'].unquote_plus - # urlencode = sys.py2_modules['urllib'].urlencode - # splitquery = sys.py2_modules['urllib'].splitquery - - with suspend_hooks(): - from urllib import (quote, - quote_plus, - unquote, - unquote_plus, - urlencode, - splitquery) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/request.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/request.py deleted file mode 100755 index 972aa4a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/request.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import absolute_import - -from future.standard_library import suspend_hooks -from future.utils import PY3 - -if PY3: - from urllib.request import * - # This aren't in __all__: - from urllib.request import (getproxies, - pathname2url, - proxy_bypass, - quote, - request_host, - thishost, - unquote, - url2pathname, - urlcleanup, - urljoin, - urlopen, - urlparse, - urlretrieve, - urlsplit, - urlunparse) - - from urllib.parse import (splitattr, - splithost, - splitpasswd, - splitport, - splitquery, - splittag, - splittype, - splituser, - splitvalue, - to_bytes, - unwrap) -else: - __future_module__ = True - with suspend_hooks(): - from urllib import * - from urllib2 import * - from urlparse import * - - # Rename: - from urllib import toBytes # missing from __all__ on Py2.6 - to_bytes = toBytes - - # from urllib import (pathname2url, - # url2pathname, - # getproxies, - # urlretrieve, - # urlcleanup, - # URLopener, - # FancyURLopener, - # proxy_bypass) - - # from urllib2 import ( - # AbstractBasicAuthHandler, - # AbstractDigestAuthHandler, - # BaseHandler, - # CacheFTPHandler, - # FileHandler, - # FTPHandler, - # HTTPBasicAuthHandler, - # HTTPCookieProcessor, - # HTTPDefaultErrorHandler, - # HTTPDigestAuthHandler, - # HTTPErrorProcessor, - # HTTPHandler, - # HTTPPasswordMgr, - # HTTPPasswordMgrWithDefaultRealm, - # HTTPRedirectHandler, - # HTTPSHandler, - # URLError, - # build_opener, - # install_opener, - # OpenerDirector, - # ProxyBasicAuthHandler, - # ProxyDigestAuthHandler, - # ProxyHandler, - # Request, - # UnknownHandler, - # urlopen, - # ) - - # from urlparse import ( - # urldefrag - # urljoin, - # urlparse, - # urlunparse, - # urlsplit, - # urlunsplit, - # parse_qs, - # parse_q" - # ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/response.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/response.py deleted file mode 100755 index a287ae2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/response.py +++ /dev/null @@ -1,12 +0,0 @@ -from future import standard_library -from future.utils import PY3 - -if PY3: - from urllib.response import * -else: - __future_module__ = True - with standard_library.suspend_hooks(): - from urllib import (addbase, - addclosehook, - addinfo, - addinfourl) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/robotparser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/robotparser.py deleted file mode 100755 index 0dc8f57..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/urllib/robotparser.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from urllib.robotparser import * -else: - __future_module__ = True - from robotparser import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/winreg.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/winreg.py deleted file mode 100755 index c8b1475..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/winreg.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from winreg import * -else: - __future_module__ = True - from _winreg import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/client.py deleted file mode 100755 index 4708cf8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/client.py +++ /dev/null @@ -1,7 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from xmlrpc.client import * -else: - from xmlrpclib import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/server.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/server.py deleted file mode 100755 index 1a8af34..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/server.py +++ /dev/null @@ -1,7 +0,0 @@ -from __future__ import absolute_import -from future.utils import PY3 - -if PY3: - from xmlrpc.server import * -else: - from xmlrpclib import * diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/standard_library/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/standard_library/__init__.py deleted file mode 100755 index cff02f9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/standard_library/__init__.py +++ /dev/null @@ -1,815 +0,0 @@ -""" -Python 3 reorganized the standard library (PEP 3108). This module exposes -several standard library modules to Python 2 under their new Python 3 -names. - -It is designed to be used as follows:: - - from future import standard_library - standard_library.install_aliases() - -And then these normal Py3 imports work on both Py3 and Py2:: - - import builtins - import copyreg - import queue - import reprlib - import socketserver - import winreg # on Windows only - import test.support - import html, html.parser, html.entites - import http, http.client, http.server - import http.cookies, http.cookiejar - import urllib.parse, urllib.request, urllib.response, urllib.error, urllib.robotparser - import xmlrpc.client, xmlrpc.server - - import _thread - import _dummy_thread - import _markupbase - - from itertools import filterfalse, zip_longest - from sys import intern - from collections import UserDict, UserList, UserString - from collections import OrderedDict, Counter, ChainMap # even on Py2.6 - from subprocess import getoutput, getstatusoutput - from subprocess import check_output # even on Py2.6 - -(The renamed modules and functions are still available under their old -names on Python 2.) - -This is a cleaner alternative to this idiom (see -http://docs.pythonsprints.com/python3_porting/py-porting.html):: - - try: - import queue - except ImportError: - import Queue as queue - - -Limitations ------------ -We don't currently support these modules, but would like to:: - - import dbm - import dbm.dumb - import dbm.gnu - import collections.abc # on Py33 - import pickle # should (optionally) bring in cPickle on Python 2 - -""" - -from __future__ import absolute_import, division, print_function - -import sys -import logging -import imp -import contextlib -import types -import copy -import os - -# Make a dedicated logger; leave the root logger to be configured -# by the application. -flog = logging.getLogger('future_stdlib') -_formatter = logging.Formatter(logging.BASIC_FORMAT) -_handler = logging.StreamHandler() -_handler.setFormatter(_formatter) -flog.addHandler(_handler) -flog.setLevel(logging.WARN) - -from future.utils import PY2, PY3 - -# The modules that are defined under the same names on Py3 but with -# different contents in a significant way (e.g. submodules) are: -# pickle (fast one) -# dbm -# urllib -# test -# email - -REPLACED_MODULES = set(['test', 'urllib', 'pickle', 'dbm']) # add email and dbm when we support it - -# The following module names are not present in Python 2.x, so they cause no -# potential clashes between the old and new names: -# http -# html -# tkinter -# xmlrpc -# Keys: Py2 / real module names -# Values: Py3 / simulated module names -RENAMES = { - # 'cStringIO': 'io', # there's a new io module in Python 2.6 - # that provides StringIO and BytesIO - # 'StringIO': 'io', # ditto - # 'cPickle': 'pickle', - '__builtin__': 'builtins', - 'copy_reg': 'copyreg', - 'Queue': 'queue', - 'future.moves.socketserver': 'socketserver', - 'ConfigParser': 'configparser', - 'repr': 'reprlib', - # 'FileDialog': 'tkinter.filedialog', - # 'tkFileDialog': 'tkinter.filedialog', - # 'SimpleDialog': 'tkinter.simpledialog', - # 'tkSimpleDialog': 'tkinter.simpledialog', - # 'tkColorChooser': 'tkinter.colorchooser', - # 'tkCommonDialog': 'tkinter.commondialog', - # 'Dialog': 'tkinter.dialog', - # 'Tkdnd': 'tkinter.dnd', - # 'tkFont': 'tkinter.font', - # 'tkMessageBox': 'tkinter.messagebox', - # 'ScrolledText': 'tkinter.scrolledtext', - # 'Tkconstants': 'tkinter.constants', - # 'Tix': 'tkinter.tix', - # 'ttk': 'tkinter.ttk', - # 'Tkinter': 'tkinter', - '_winreg': 'winreg', - 'thread': '_thread', - 'dummy_thread': '_dummy_thread', - # 'anydbm': 'dbm', # causes infinite import loop - # 'whichdb': 'dbm', # causes infinite import loop - # anydbm and whichdb are handled by fix_imports2 - # 'dbhash': 'dbm.bsd', - # 'dumbdbm': 'dbm.dumb', - # 'dbm': 'dbm.ndbm', - # 'gdbm': 'dbm.gnu', - 'future.moves.xmlrpc': 'xmlrpc', - # 'future.backports.email': 'email', # for use by urllib - # 'DocXMLRPCServer': 'xmlrpc.server', - # 'SimpleXMLRPCServer': 'xmlrpc.server', - # 'httplib': 'http.client', - # 'htmlentitydefs' : 'html.entities', - # 'HTMLParser' : 'html.parser', - # 'Cookie': 'http.cookies', - # 'cookielib': 'http.cookiejar', - # 'BaseHTTPServer': 'http.server', - # 'SimpleHTTPServer': 'http.server', - # 'CGIHTTPServer': 'http.server', - # 'future.backports.test': 'test', # primarily for renaming test_support to support - # 'commands': 'subprocess', - # 'urlparse' : 'urllib.parse', - # 'robotparser' : 'urllib.robotparser', - # 'abc': 'collections.abc', # for Py33 - # 'future.utils.six.moves.html': 'html', - # 'future.utils.six.moves.http': 'http', - 'future.moves.html': 'html', - 'future.moves.http': 'http', - # 'future.backports.urllib': 'urllib', - # 'future.utils.six.moves.urllib': 'urllib', - 'future.moves._markupbase': '_markupbase', - } - - -# It is complicated and apparently brittle to mess around with the -# ``sys.modules`` cache in order to support "import urllib" meaning two -# different things (Py2.7 urllib and backported Py3.3-like urllib) in different -# contexts. So we require explicit imports for these modules. -assert len(set(RENAMES.values()) & set(REPLACED_MODULES)) == 0 - - -# Harmless renames that we can insert. -# These modules need names from elsewhere being added to them: -# subprocess: should provide getoutput and other fns from commands -# module but these fns are missing: getstatus, mk2arg, -# mkarg -# re: needs an ASCII constant that works compatibly with Py3 - -# etc: see lib2to3/fixes/fix_imports.py - -# (New module name, new object name, old module name, old object name) -MOVES = [('collections', 'UserList', 'UserList', 'UserList'), - ('collections', 'UserDict', 'UserDict', 'UserDict'), - ('collections', 'UserString','UserString', 'UserString'), - ('collections', 'ChainMap', 'future.backports.misc', 'ChainMap'), - ('itertools', 'filterfalse','itertools', 'ifilterfalse'), - ('itertools', 'zip_longest','itertools', 'izip_longest'), - ('sys', 'intern','__builtin__', 'intern'), - # The re module has no ASCII flag in Py2, but this is the default. - # Set re.ASCII to a zero constant. stat.ST_MODE just happens to be one - # (and it exists on Py2.6+). - ('re', 'ASCII','stat', 'ST_MODE'), - ('base64', 'encodebytes','base64', 'encodestring'), - ('base64', 'decodebytes','base64', 'decodestring'), - ('subprocess', 'getoutput', 'commands', 'getoutput'), - ('subprocess', 'getstatusoutput', 'commands', 'getstatusoutput'), - ('subprocess', 'check_output', 'future.backports.misc', 'check_output'), - ('math', 'ceil', 'future.backports.misc', 'ceil'), - ('collections', 'OrderedDict', 'future.backports.misc', 'OrderedDict'), - ('collections', 'Counter', 'future.backports.misc', 'Counter'), - ('collections', 'ChainMap', 'future.backports.misc', 'ChainMap'), - ('itertools', 'count', 'future.backports.misc', 'count'), - ('reprlib', 'recursive_repr', 'future.backports.misc', 'recursive_repr'), - ('functools', 'cmp_to_key', 'future.backports.misc', 'cmp_to_key'), - -# This is no use, since "import urllib.request" etc. still fails: -# ('urllib', 'error', 'future.moves.urllib', 'error'), -# ('urllib', 'parse', 'future.moves.urllib', 'parse'), -# ('urllib', 'request', 'future.moves.urllib', 'request'), -# ('urllib', 'response', 'future.moves.urllib', 'response'), -# ('urllib', 'robotparser', 'future.moves.urllib', 'robotparser'), - ] - - -# A minimal example of an import hook: -# class WarnOnImport(object): -# def __init__(self, *args): -# self.module_names = args -# -# def find_module(self, fullname, path=None): -# if fullname in self.module_names: -# self.path = path -# return self -# return None -# -# def load_module(self, name): -# if name in sys.modules: -# return sys.modules[name] -# module_info = imp.find_module(name, self.path) -# module = imp.load_module(name, *module_info) -# sys.modules[name] = module -# flog.warning("Imported deprecated module %s", name) -# return module - - -class RenameImport(object): - """ - A class for import hooks mapping Py3 module names etc. to the Py2 equivalents. - """ - # Different RenameImport classes are created when importing this module from - # different source files. This causes isinstance(hook, RenameImport) checks - # to produce inconsistent results. We add this RENAMER attribute here so - # remove_hooks() and install_hooks() can find instances of these classes - # easily: - RENAMER = True - - def __init__(self, old_to_new): - ''' - Pass in a dictionary-like object mapping from old names to new - names. E.g. {'ConfigParser': 'configparser', 'cPickle': 'pickle'} - ''' - self.old_to_new = old_to_new - both = set(old_to_new.keys()) & set(old_to_new.values()) - assert (len(both) == 0 and - len(set(old_to_new.values())) == len(old_to_new.values())), \ - 'Ambiguity in renaming (handler not implemented)' - self.new_to_old = dict((new, old) for (old, new) in old_to_new.items()) - - def find_module(self, fullname, path=None): - # Handles hierarchical importing: package.module.module2 - new_base_names = set([s.split('.')[0] for s in self.new_to_old]) - # Before v0.12: Was: if fullname in set(self.old_to_new) | new_base_names: - if fullname in new_base_names: - return self - return None - - def load_module(self, name): - path = None - if name in sys.modules: - return sys.modules[name] - elif name in self.new_to_old: - # New name. Look up the corresponding old (Py2) name: - oldname = self.new_to_old[name] - module = self._find_and_load_module(oldname) - # module.__future_module__ = True - else: - module = self._find_and_load_module(name) - # In any case, make it available under the requested (Py3) name - sys.modules[name] = module - return module - - def _find_and_load_module(self, name, path=None): - """ - Finds and loads it. But if there's a . in the name, handles it - properly. - """ - bits = name.split('.') - while len(bits) > 1: - # Treat the first bit as a package - packagename = bits.pop(0) - package = self._find_and_load_module(packagename, path) - try: - path = package.__path__ - except AttributeError: - # This could be e.g. moves. - flog.debug('Package {0} has no __path__.'.format(package)) - if name in sys.modules: - return sys.modules[name] - flog.debug('What to do here?') - - name = bits[0] - module_info = imp.find_module(name, path) - return imp.load_module(name, *module_info) - - -class hooks(object): - """ - Acts as a context manager. Saves the state of sys.modules and restores it - after the 'with' block. - - Use like this: - - >>> from future import standard_library - >>> with standard_library.hooks(): - ... import http.client - >>> import requests - - For this to work, http.client will be scrubbed from sys.modules after the - 'with' block. That way the modules imported in the 'with' block will - continue to be accessible in the current namespace but not from any - imported modules (like requests). - """ - def __enter__(self): - # flog.debug('Entering hooks context manager') - self.old_sys_modules = copy.copy(sys.modules) - self.hooks_were_installed = detect_hooks() - # self.scrubbed = scrub_py2_sys_modules() - install_hooks() - return self - - def __exit__(self, *args): - # flog.debug('Exiting hooks context manager') - # restore_sys_modules(self.scrubbed) - if not self.hooks_were_installed: - remove_hooks() - # scrub_future_sys_modules() - -# Sanity check for is_py2_stdlib_module(): We aren't replacing any -# builtin modules names: -if PY2: - assert len(set(RENAMES.values()) & set(sys.builtin_module_names)) == 0 - - -def is_py2_stdlib_module(m): - """ - Tries to infer whether the module m is from the Python 2 standard library. - This may not be reliable on all systems. - """ - if PY3: - return False - if not 'stdlib_path' in is_py2_stdlib_module.__dict__: - stdlib_files = [contextlib.__file__, os.__file__, copy.__file__] - stdlib_paths = [os.path.split(f)[0] for f in stdlib_files] - if not len(set(stdlib_paths)) == 1: - # This seems to happen on travis-ci.org. Very strange. We'll try to - # ignore it. - flog.warn('Multiple locations found for the Python standard ' - 'library: %s' % stdlib_paths) - # Choose the first one arbitrarily - is_py2_stdlib_module.stdlib_path = stdlib_paths[0] - - if m.__name__ in sys.builtin_module_names: - return True - - if hasattr(m, '__file__'): - modpath = os.path.split(m.__file__) - if (modpath[0].startswith(is_py2_stdlib_module.stdlib_path) and - 'site-packages' not in modpath[0]): - return True - - return False - - -def scrub_py2_sys_modules(): - """ - Removes any Python 2 standard library modules from ``sys.modules`` that - would interfere with Py3-style imports using import hooks. Examples are - modules with the same names (like urllib or email). - - (Note that currently import hooks are disabled for modules like these - with ambiguous names anyway ...) - """ - if PY3: - return {} - scrubbed = {} - for modulename in REPLACED_MODULES & set(RENAMES.keys()): - if not modulename in sys.modules: - continue - - module = sys.modules[modulename] - - if is_py2_stdlib_module(module): - flog.debug('Deleting (Py2) {} from sys.modules'.format(modulename)) - scrubbed[modulename] = sys.modules[modulename] - del sys.modules[modulename] - return scrubbed - - -def scrub_future_sys_modules(): - """ - Deprecated. - """ - return {} - -class suspend_hooks(object): - """ - Acts as a context manager. Use like this: - - >>> from future import standard_library - >>> standard_library.install_hooks() - >>> import http.client - >>> # ... - >>> with standard_library.suspend_hooks(): - >>> import requests # incompatible with ``future``'s standard library hooks - - If the hooks were disabled before the context, they are not installed when - the context is left. - """ - def __enter__(self): - self.hooks_were_installed = detect_hooks() - remove_hooks() - # self.scrubbed = scrub_future_sys_modules() - return self - - def __exit__(self, *args): - if self.hooks_were_installed: - install_hooks() - # restore_sys_modules(self.scrubbed) - - -def restore_sys_modules(scrubbed): - """ - Add any previously scrubbed modules back to the sys.modules cache, - but only if it's safe to do so. - """ - clash = set(sys.modules) & set(scrubbed) - if len(clash) != 0: - # If several, choose one arbitrarily to raise an exception about - first = list(clash)[0] - raise ImportError('future module {} clashes with Py2 module' - .format(first)) - sys.modules.update(scrubbed) - - -def install_aliases(): - """ - Monkey-patches the standard library in Py2.6/7 to provide - aliases for better Py3 compatibility. - """ - if PY3: - return - # if hasattr(install_aliases, 'run_already'): - # return - for (newmodname, newobjname, oldmodname, oldobjname) in MOVES: - __import__(newmodname) - # We look up the module in sys.modules because __import__ just returns the - # top-level package: - newmod = sys.modules[newmodname] - # newmod.__future_module__ = True - - __import__(oldmodname) - oldmod = sys.modules[oldmodname] - - obj = getattr(oldmod, oldobjname) - setattr(newmod, newobjname, obj) - - # Hack for urllib so it appears to have the same structure on Py2 as on Py3 - import urllib - from future.backports.urllib import request - from future.backports.urllib import response - from future.backports.urllib import parse - from future.backports.urllib import error - from future.backports.urllib import robotparser - urllib.request = request - urllib.response = response - urllib.parse = parse - urllib.error = error - urllib.robotparser = robotparser - sys.modules['urllib.request'] = request - sys.modules['urllib.response'] = response - sys.modules['urllib.parse'] = parse - sys.modules['urllib.error'] = error - sys.modules['urllib.robotparser'] = robotparser - - # Patch the test module so it appears to have the same structure on Py2 as on Py3 - try: - import test - except ImportError: - pass - try: - from future.moves.test import support - except ImportError: - pass - else: - test.support = support - sys.modules['test.support'] = support - - # Patch the dbm module so it appears to have the same structure on Py2 as on Py3 - try: - import dbm - except ImportError: - pass - else: - from future.moves.dbm import dumb - dbm.dumb = dumb - sys.modules['dbm.dumb'] = dumb - try: - from future.moves.dbm import gnu - except ImportError: - pass - else: - dbm.gnu = gnu - sys.modules['dbm.gnu'] = gnu - try: - from future.moves.dbm import ndbm - except ImportError: - pass - else: - dbm.ndbm = ndbm - sys.modules['dbm.ndbm'] = ndbm - - # install_aliases.run_already = True - - -def install_hooks(): - """ - This function installs the future.standard_library import hook into - sys.meta_path. - """ - if PY3: - return - - install_aliases() - - flog.debug('sys.meta_path was: {0}'.format(sys.meta_path)) - flog.debug('Installing hooks ...') - - # Add it unless it's there already - newhook = RenameImport(RENAMES) - if not detect_hooks(): - sys.meta_path.append(newhook) - flog.debug('sys.meta_path is now: {0}'.format(sys.meta_path)) - - -def enable_hooks(): - """ - Deprecated. Use install_hooks() instead. This will be removed by - ``future`` v1.0. - """ - install_hooks() - - -def remove_hooks(scrub_sys_modules=False): - """ - This function removes the import hook from sys.meta_path. - """ - if PY3: - return - flog.debug('Uninstalling hooks ...') - # Loop backwards, so deleting items keeps the ordering: - for i, hook in list(enumerate(sys.meta_path))[::-1]: - if hasattr(hook, 'RENAMER'): - del sys.meta_path[i] - - # Explicit is better than implicit. In the future the interface should - # probably change so that scrubbing the import hooks requires a separate - # function call. Left as is for now for backward compatibility with - # v0.11.x. - if scrub_sys_modules: - scrub_future_sys_modules() - - -def disable_hooks(): - """ - Deprecated. Use remove_hooks() instead. This will be removed by - ``future`` v1.0. - """ - remove_hooks() - - -def detect_hooks(): - """ - Returns True if the import hooks are installed, False if not. - """ - flog.debug('Detecting hooks ...') - present = any([hasattr(hook, 'RENAMER') for hook in sys.meta_path]) - if present: - flog.debug('Detected.') - else: - flog.debug('Not detected.') - return present - - -# As of v0.12, this no longer happens implicitly: -# if not PY3: -# install_hooks() - - -if not hasattr(sys, 'py2_modules'): - sys.py2_modules = {} - -def cache_py2_modules(): - """ - Currently this function is unneeded, as we are not attempting to provide import hooks - for modules with ambiguous names: email, urllib, pickle. - """ - if len(sys.py2_modules) != 0: - return - assert not detect_hooks() - import urllib - sys.py2_modules['urllib'] = urllib - - import email - sys.py2_modules['email'] = email - - import pickle - sys.py2_modules['pickle'] = pickle - - # Not all Python installations have test module. (Anaconda doesn't, for example.) - # try: - # import test - # except ImportError: - # sys.py2_modules['test'] = None - # sys.py2_modules['test'] = test - - # import dbm - # sys.py2_modules['dbm'] = dbm - - -def import_(module_name, backport=False): - """ - Pass a (potentially dotted) module name of a Python 3 standard library - module. This function imports the module compatibly on Py2 and Py3 and - returns the top-level module. - - Example use: - >>> http = import_('http.client') - >>> http = import_('http.server') - >>> urllib = import_('urllib.request') - - Then: - >>> conn = http.client.HTTPConnection(...) - >>> response = urllib.request.urlopen('http://mywebsite.com') - >>> # etc. - - Use as follows: - >>> package_name = import_(module_name) - - On Py3, equivalent to this: - - >>> import module_name - - On Py2, equivalent to this if backport=False: - - >>> from future.moves import module_name - - or to this if backport=True: - - >>> from future.backports import module_name - - except that it also handles dotted module names such as ``http.client`` - The effect then is like this: - - >>> from future.backports import module - >>> from future.backports.module import submodule - >>> module.submodule = submodule - - Note that this would be a SyntaxError in Python: - - >>> from future.backports import http.client - - """ - # Python 2.6 doesn't have importlib in the stdlib, so it requires - # the backported ``importlib`` package from PyPI as a dependency to use - # this function: - import importlib - - if PY3: - return __import__(module_name) - else: - # client.blah = blah - # Then http.client = client - # etc. - if backport: - prefix = 'future.backports' - else: - prefix = 'future.moves' - parts = prefix.split('.') + module_name.split('.') - - modules = [] - for i, part in enumerate(parts): - sofar = '.'.join(parts[:i+1]) - modules.append(importlib.import_module(sofar)) - for i, part in reversed(list(enumerate(parts))): - if i == 0: - break - setattr(modules[i-1], part, modules[i]) - - # Return the next-most top-level module after future.backports / future.moves: - return modules[2] - - -def from_import(module_name, *symbol_names, **kwargs): - """ - Example use: - >>> HTTPConnection = from_import('http.client', 'HTTPConnection') - >>> HTTPServer = from_import('http.server', 'HTTPServer') - >>> urlopen, urlparse = from_import('urllib.request', 'urlopen', 'urlparse') - - Equivalent to this on Py3: - - >>> from module_name import symbol_names[0], symbol_names[1], ... - - and this on Py2: - - >>> from future.moves.module_name import symbol_names[0], ... - - or: - - >>> from future.backports.module_name import symbol_names[0], ... - - except that it also handles dotted module names such as ``http.client``. - """ - - if PY3: - return __import__(module_name) - else: - if 'backport' in kwargs and bool(kwargs['backport']): - prefix = 'future.backports' - else: - prefix = 'future.moves' - parts = prefix.split('.') + module_name.split('.') - module = importlib.import_module(prefix + '.' + module_name) - output = [getattr(module, name) for name in symbol_names] - if len(output) == 1: - return output[0] - else: - return output - - -class exclude_local_folder_imports(object): - """ - A context-manager that prevents standard library modules like configparser - from being imported from the local python-future source folder on Py3. - - (This was need prior to v0.16.0 because the presence of a configparser - folder would otherwise have prevented setuptools from running on Py3. Maybe - it's not needed any more?) - """ - def __init__(self, *args): - assert len(args) > 0 - self.module_names = args - # Disallow dotted module names like http.client: - if any(['.' in m for m in self.module_names]): - raise NotImplementedError('Dotted module names are not supported') - - def __enter__(self): - self.old_sys_path = copy.copy(sys.path) - self.old_sys_modules = copy.copy(sys.modules) - if sys.version_info[0] < 3: - return - # The presence of all these indicates we've found our source folder, - # because `builtins` won't have been installed in site-packages by setup.py: - FUTURE_SOURCE_SUBFOLDERS = ['future', 'past', 'libfuturize', 'libpasteurize', 'builtins'] - - # Look for the future source folder: - for folder in self.old_sys_path: - if all([os.path.exists(os.path.join(folder, subfolder)) - for subfolder in FUTURE_SOURCE_SUBFOLDERS]): - # Found it. Remove it. - sys.path.remove(folder) - - # Ensure we import the system module: - for m in self.module_names: - # Delete the module and any submodules from sys.modules: - # for key in list(sys.modules): - # if key == m or key.startswith(m + '.'): - # try: - # del sys.modules[key] - # except KeyError: - # pass - try: - module = __import__(m, level=0) - except ImportError: - # There's a problem importing the system module. E.g. the - # winreg module is not available except on Windows. - pass - - def __exit__(self, *args): - # Restore sys.path and sys.modules: - sys.path = self.old_sys_path - for m in set(self.old_sys_modules.keys()) - set(sys.modules.keys()): - sys.modules[m] = self.old_sys_modules[m] - -TOP_LEVEL_MODULES = ['builtins', - 'copyreg', - 'html', - 'http', - 'queue', - 'reprlib', - 'socketserver', - 'test', - 'tkinter', - 'winreg', - 'xmlrpc', - '_dummy_thread', - '_markupbase', - '_thread', - ] - -def import_top_level_modules(): - with exclude_local_folder_imports(*TOP_LEVEL_MODULES): - for m in TOP_LEVEL_MODULES: - try: - __import__(m) - except ImportError: # e.g. winreg - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/tests/base.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/tests/base.py deleted file mode 100755 index 4ef437b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/tests/base.py +++ /dev/null @@ -1,539 +0,0 @@ -from __future__ import print_function, absolute_import -import os -import tempfile -import unittest -import sys -import re -import warnings -import io -from textwrap import dedent - -from future.utils import bind_method, PY26, PY3, PY2, PY27 -from future.moves.subprocess import check_output, STDOUT, CalledProcessError - -if PY26: - import unittest2 as unittest - - -def reformat_code(code): - """ - Removes any leading \n and dedents. - """ - if code.startswith('\n'): - code = code[1:] - return dedent(code) - - -def order_future_lines(code): - """ - Returns the code block with any ``__future__`` import lines sorted, and - then any ``future`` import lines sorted, then any ``builtins`` import lines - sorted. - - This only sorts the lines within the expected blocks. - - See test_order_future_lines() for an example. - """ - - # We need .splitlines(keepends=True), which doesn't exist on Py2, - # so we use this instead: - lines = code.split('\n') - - uufuture_line_numbers = [i for i, line in enumerate(lines) - if line.startswith('from __future__ import ')] - - future_line_numbers = [i for i, line in enumerate(lines) - if line.startswith('from future') - or line.startswith('from past')] - - builtins_line_numbers = [i for i, line in enumerate(lines) - if line.startswith('from builtins')] - - assert code.lstrip() == code, ('internal usage error: ' - 'dedent the code before calling order_future_lines()') - - def mymax(numbers): - return max(numbers) if len(numbers) > 0 else 0 - - def mymin(numbers): - return min(numbers) if len(numbers) > 0 else float('inf') - - assert mymax(uufuture_line_numbers) <= mymin(future_line_numbers), \ - 'the __future__ and future imports are out of order' - - # assert mymax(future_line_numbers) <= mymin(builtins_line_numbers), \ - # 'the future and builtins imports are out of order' - - uul = sorted([lines[i] for i in uufuture_line_numbers]) - sorted_uufuture_lines = dict(zip(uufuture_line_numbers, uul)) - - fl = sorted([lines[i] for i in future_line_numbers]) - sorted_future_lines = dict(zip(future_line_numbers, fl)) - - bl = sorted([lines[i] for i in builtins_line_numbers]) - sorted_builtins_lines = dict(zip(builtins_line_numbers, bl)) - - # Replace the old unsorted "from __future__ import ..." lines with the - # new sorted ones: - new_lines = [] - for i in range(len(lines)): - if i in uufuture_line_numbers: - new_lines.append(sorted_uufuture_lines[i]) - elif i in future_line_numbers: - new_lines.append(sorted_future_lines[i]) - elif i in builtins_line_numbers: - new_lines.append(sorted_builtins_lines[i]) - else: - new_lines.append(lines[i]) - return '\n'.join(new_lines) - - -class VerboseCalledProcessError(CalledProcessError): - """ - Like CalledProcessError, but it displays more information (message and - script output) for diagnosing test failures etc. - """ - def __init__(self, msg, returncode, cmd, output=None): - self.msg = msg - self.returncode = returncode - self.cmd = cmd - self.output = output - - def __str__(self): - return ("Command '%s' failed with exit status %d\nMessage: %s\nOutput: %s" - % (self.cmd, self.returncode, self.msg, self.output)) - -class FuturizeError(VerboseCalledProcessError): - pass - -class PasteurizeError(VerboseCalledProcessError): - pass - - -class CodeHandler(unittest.TestCase): - """ - Handy mixin for test classes for writing / reading / futurizing / - running .py files in the test suite. - """ - def setUp(self): - """ - The outputs from the various futurize stages should have the - following headers: - """ - # After stage1: - # TODO: use this form after implementing a fixer to consolidate - # __future__ imports into a single line: - # self.headers1 = """ - # from __future__ import absolute_import, division, print_function - # """ - self.headers1 = reformat_code(""" - from __future__ import absolute_import - from __future__ import division - from __future__ import print_function - """) - - # After stage2 --all-imports: - # TODO: use this form after implementing a fixer to consolidate - # __future__ imports into a single line: - # self.headers2 = """ - # from __future__ import (absolute_import, division, - # print_function, unicode_literals) - # from future import standard_library - # from future.builtins import * - # """ - self.headers2 = reformat_code(""" - from __future__ import absolute_import - from __future__ import division - from __future__ import print_function - from __future__ import unicode_literals - from future import standard_library - standard_library.install_aliases() - from builtins import * - """) - self.interpreters = [sys.executable] - self.tempdir = tempfile.mkdtemp() + os.path.sep - pypath = os.getenv('PYTHONPATH') - if pypath: - self.env = {'PYTHONPATH': os.getcwd() + os.pathsep + pypath} - else: - self.env = {'PYTHONPATH': os.getcwd()} - - def convert(self, code, stages=(1, 2), all_imports=False, from3=False, - reformat=True, run=True, conservative=False): - """ - Converts the code block using ``futurize`` and returns the - resulting code. - - Passing stages=[1] or stages=[2] passes the flag ``--stage1`` or - ``stage2`` to ``futurize``. Passing both stages runs ``futurize`` - with both stages by default. - - If from3 is False, runs ``futurize``, converting from Python 2 to - both 2 and 3. If from3 is True, runs ``pasteurize`` to convert - from Python 3 to both 2 and 3. - - Optionally reformats the code block first using the reformat() function. - - If run is True, runs the resulting code under all Python - interpreters in self.interpreters. - """ - if reformat: - code = reformat_code(code) - self._write_test_script(code) - self._futurize_test_script(stages=stages, all_imports=all_imports, - from3=from3, conservative=conservative) - output = self._read_test_script() - if run: - for interpreter in self.interpreters: - _ = self._run_test_script(interpreter=interpreter) - return output - - def compare(self, output, expected, ignore_imports=True): - """ - Compares whether the code blocks are equal. If not, raises an - exception so the test fails. Ignores any trailing whitespace like - blank lines. - - If ignore_imports is True, passes the code blocks into the - strip_future_imports method. - - If one code block is a unicode string and the other a - byte-string, it assumes the byte-string is encoded as utf-8. - """ - if ignore_imports: - output = self.strip_future_imports(output) - expected = self.strip_future_imports(expected) - if isinstance(output, bytes) and not isinstance(expected, bytes): - output = output.decode('utf-8') - if isinstance(expected, bytes) and not isinstance(output, bytes): - expected = expected.decode('utf-8') - self.assertEqual(order_future_lines(output.rstrip()), - expected.rstrip()) - - def strip_future_imports(self, code): - """ - Strips any of these import lines: - - from __future__ import - from future - from future. - from builtins - - or any line containing: - install_hooks() - or: - install_aliases() - - Limitation: doesn't handle imports split across multiple lines like - this: - - from __future__ import (absolute_import, division, print_function, - unicode_literals) - """ - output = [] - # We need .splitlines(keepends=True), which doesn't exist on Py2, - # so we use this instead: - for line in code.split('\n'): - if not (line.startswith('from __future__ import ') - or line.startswith('from future ') - or line.startswith('from builtins ') - or 'install_hooks()' in line - or 'install_aliases()' in line - # but don't match "from future_builtins" :) - or line.startswith('from future.')): - output.append(line) - return '\n'.join(output) - - def convert_check(self, before, expected, stages=(1, 2), all_imports=False, - ignore_imports=True, from3=False, run=True, - conservative=False): - """ - Convenience method that calls convert() and compare(). - - Reformats the code blocks automatically using the reformat_code() - function. - - If all_imports is passed, we add the appropriate import headers - for the stage(s) selected to the ``expected`` code-block, so they - needn't appear repeatedly in the test code. - - If ignore_imports is True, ignores the presence of any lines - beginning: - - from __future__ import ... - from future import ... - - for the purpose of the comparison. - """ - output = self.convert(before, stages=stages, all_imports=all_imports, - from3=from3, run=run, conservative=conservative) - if all_imports: - headers = self.headers2 if 2 in stages else self.headers1 - else: - headers = '' - - reformatted = reformat_code(expected) - if headers in reformatted: - headers = '' - - self.compare(output, headers + reformatted, - ignore_imports=ignore_imports) - - def unchanged(self, code, **kwargs): - """ - Convenience method to ensure the code is unchanged by the - futurize process. - """ - self.convert_check(code, code, **kwargs) - - def _write_test_script(self, code, filename='mytestscript.py'): - """ - Dedents the given code (a multiline string) and writes it out to - a file in a temporary folder like /tmp/tmpUDCn7x/mytestscript.py. - """ - if isinstance(code, bytes): - code = code.decode('utf-8') - # Be explicit about encoding the temp file as UTF-8 (issue #63): - with io.open(self.tempdir + filename, 'wt', encoding='utf-8') as f: - f.write(dedent(code)) - - def _read_test_script(self, filename='mytestscript.py'): - with io.open(self.tempdir + filename, 'rt', encoding='utf-8') as f: - newsource = f.read() - return newsource - - def _futurize_test_script(self, filename='mytestscript.py', stages=(1, 2), - all_imports=False, from3=False, - conservative=False): - params = [] - stages = list(stages) - if all_imports: - params.append('--all-imports') - if from3: - script = 'pasteurize.py' - else: - script = 'futurize.py' - if stages == [1]: - params.append('--stage1') - elif stages == [2]: - params.append('--stage2') - else: - assert stages == [1, 2] - if conservative: - params.append('--conservative') - # No extra params needed - - # Absolute file path: - fn = self.tempdir + filename - call_args = [sys.executable, script] + params + ['-w', fn] - try: - output = check_output(call_args, stderr=STDOUT, env=self.env) - except CalledProcessError as e: - with open(fn) as f: - msg = ( - 'Error running the command %s\n' - '%s\n' - 'Contents of file %s:\n' - '\n' - '%s') % ( - ' '.join(call_args), - 'env=%s' % self.env, - fn, - '----\n%s\n----' % f.read(), - ) - ErrorClass = (FuturizeError if 'futurize' in script else PasteurizeError) - - if not hasattr(e, 'output'): - # The attribute CalledProcessError.output doesn't exist on Py2.6 - e.output = None - raise ErrorClass(msg, e.returncode, e.cmd, output=e.output) - return output - - def _run_test_script(self, filename='mytestscript.py', - interpreter=sys.executable): - # Absolute file path: - fn = self.tempdir + filename - try: - output = check_output([interpreter, fn], - env=self.env, stderr=STDOUT) - except CalledProcessError as e: - with open(fn) as f: - msg = ( - 'Error running the command %s\n' - '%s\n' - 'Contents of file %s:\n' - '\n' - '%s') % ( - ' '.join([interpreter, fn]), - 'env=%s' % self.env, - fn, - '----\n%s\n----' % f.read(), - ) - if not hasattr(e, 'output'): - # The attribute CalledProcessError.output doesn't exist on Py2.6 - e.output = None - raise VerboseCalledProcessError(msg, e.returncode, e.cmd, output=e.output) - return output - - -# Decorator to skip some tests on Python 2.6 ... -skip26 = unittest.skipIf(PY26, "this test is known to fail on Py2.6") - - -def expectedFailurePY3(func): - if not PY3: - return func - return unittest.expectedFailure(func) - -def expectedFailurePY26(func): - if not PY26: - return func - return unittest.expectedFailure(func) - - -def expectedFailurePY27(func): - if not PY27: - return func - return unittest.expectedFailure(func) - - -def expectedFailurePY2(func): - if not PY2: - return func - return unittest.expectedFailure(func) - - -# Renamed in Py3.3: -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - -# From Py3.3: -def assertRegex(self, text, expected_regex, msg=None): - """Fail the test unless the text matches the regular expression.""" - if isinstance(expected_regex, (str, unicode)): - assert expected_regex, "expected_regex must not be empty." - expected_regex = re.compile(expected_regex) - if not expected_regex.search(text): - msg = msg or "Regex didn't match" - msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text) - raise self.failureException(msg) - -if not hasattr(unittest.TestCase, 'assertRegex'): - bind_method(unittest.TestCase, 'assertRegex', assertRegex) - -class _AssertRaisesBaseContext(object): - - def __init__(self, expected, test_case, callable_obj=None, - expected_regex=None): - self.expected = expected - self.test_case = test_case - if callable_obj is not None: - try: - self.obj_name = callable_obj.__name__ - except AttributeError: - self.obj_name = str(callable_obj) - else: - self.obj_name = None - if isinstance(expected_regex, (bytes, str)): - expected_regex = re.compile(expected_regex) - self.expected_regex = expected_regex - self.msg = None - - def _raiseFailure(self, standardMsg): - msg = self.test_case._formatMessage(self.msg, standardMsg) - raise self.test_case.failureException(msg) - - def handle(self, name, callable_obj, args, kwargs): - """ - If callable_obj is None, assertRaises/Warns is being used as a - context manager, so check for a 'msg' kwarg and return self. - If callable_obj is not None, call it passing args and kwargs. - """ - if callable_obj is None: - self.msg = kwargs.pop('msg', None) - return self - with self: - callable_obj(*args, **kwargs) - -class _AssertWarnsContext(_AssertRaisesBaseContext): - """A context manager used to implement TestCase.assertWarns* methods.""" - - def __enter__(self): - # The __warningregistry__'s need to be in a pristine state for tests - # to work properly. - for v in sys.modules.values(): - if getattr(v, '__warningregistry__', None): - v.__warningregistry__ = {} - self.warnings_manager = warnings.catch_warnings(record=True) - self.warnings = self.warnings_manager.__enter__() - warnings.simplefilter("always", self.expected) - return self - - def __exit__(self, exc_type, exc_value, tb): - self.warnings_manager.__exit__(exc_type, exc_value, tb) - if exc_type is not None: - # let unexpected exceptions pass through - return - try: - exc_name = self.expected.__name__ - except AttributeError: - exc_name = str(self.expected) - first_matching = None - for m in self.warnings: - w = m.message - if not isinstance(w, self.expected): - continue - if first_matching is None: - first_matching = w - if (self.expected_regex is not None and - not self.expected_regex.search(str(w))): - continue - # store warning for later retrieval - self.warning = w - self.filename = m.filename - self.lineno = m.lineno - return - # Now we simply try to choose a helpful failure message - if first_matching is not None: - self._raiseFailure('"{}" does not match "{}"'.format( - self.expected_regex.pattern, str(first_matching))) - if self.obj_name: - self._raiseFailure("{} not triggered by {}".format(exc_name, - self.obj_name)) - else: - self._raiseFailure("{} not triggered".format(exc_name)) - - -def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs): - """Fail unless a warning of class warnClass is triggered - by callable_obj when invoked with arguments args and keyword - arguments kwargs. If a different type of warning is - triggered, it will not be handled: depending on the other - warning filtering rules in effect, it might be silenced, printed - out, or raised as an exception. - - If called with callable_obj omitted or None, will return a - context object used like this:: - - with self.assertWarns(SomeWarning): - do_something() - - An optional keyword argument 'msg' can be provided when assertWarns - is used as a context object. - - The context manager keeps a reference to the first matching - warning as the 'warning' attribute; similarly, the 'filename' - and 'lineno' attributes give you information about the line - of Python code from which the warning was triggered. - This allows you to inspect the warning after the assertion:: - - with self.assertWarns(SomeWarning) as cm: - do_something() - the_warning = cm.warning - self.assertEqual(the_warning.some_attribute, 147) - """ - context = _AssertWarnsContext(expected_warning, self, callable_obj) - return context.handle('assertWarns', callable_obj, args, kwargs) - -if not hasattr(unittest.TestCase, 'assertWarns'): - bind_method(unittest.TestCase, 'assertWarns', assertWarns) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/__init__.py deleted file mode 100755 index 0625077..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/__init__.py +++ /dev/null @@ -1,257 +0,0 @@ -""" -This module contains backports the data types that were significantly changed -in the transition from Python 2 to Python 3. - -- an implementation of Python 3's bytes object (pure Python subclass of - Python 2's builtin 8-bit str type) -- an implementation of Python 3's str object (pure Python subclass of - Python 2's builtin unicode type) -- a backport of the range iterator from Py3 with slicing support - -It is used as follows:: - - from __future__ import division, absolute_import, print_function - from builtins import bytes, dict, int, range, str - -to bring in the new semantics for these functions from Python 3. And -then, for example:: - - b = bytes(b'ABCD') - assert list(b) == [65, 66, 67, 68] - assert repr(b) == "b'ABCD'" - assert [65, 66] in b - - # These raise TypeErrors: - # b + u'EFGH' - # b.split(u'B') - # bytes(b',').join([u'Fred', u'Bill']) - - - s = str(u'ABCD') - - # These raise TypeErrors: - # s.join([b'Fred', b'Bill']) - # s.startswith(b'A') - # b'B' in s - # s.find(b'A') - # s.replace(u'A', b'a') - - # This raises an AttributeError: - # s.decode('utf-8') - - assert repr(s) == 'ABCD' # consistent repr with Py3 (no u prefix) - - - for i in range(10**11)[:10]: - pass - -and:: - - class VerboseList(list): - def append(self, item): - print('Adding an item') - super().append(item) # new simpler super() function - -For more information: ---------------------- - -- future.types.newbytes -- future.types.newdict -- future.types.newint -- future.types.newobject -- future.types.newrange -- future.types.newstr - - -Notes -===== - -range() -------- -``range`` is a custom class that backports the slicing behaviour from -Python 3 (based on the ``xrange`` module by Dan Crosta). See the -``newrange`` module docstring for more details. - - -super() -------- -``super()`` is based on Ryan Kelly's ``magicsuper`` module. See the -``newsuper`` module docstring for more details. - - -round() -------- -Python 3 modifies the behaviour of ``round()`` to use "Banker's Rounding". -See http://stackoverflow.com/a/10825998. See the ``newround`` module -docstring for more details. - -""" - -from __future__ import absolute_import, division, print_function - -import functools -from numbers import Integral - -from future import utils - - -# Some utility functions to enforce strict type-separation of unicode str and -# bytes: -def disallow_types(argnums, disallowed_types): - """ - A decorator that raises a TypeError if any of the given numbered - arguments is of the corresponding given type (e.g. bytes or unicode - string). - - For example: - - @disallow_types([0, 1], [unicode, bytes]) - def f(a, b): - pass - - raises a TypeError when f is called if a unicode object is passed as - `a` or a bytes object is passed as `b`. - - This also skips over keyword arguments, so - - @disallow_types([0, 1], [unicode, bytes]) - def g(a, b=None): - pass - - doesn't raise an exception if g is called with only one argument a, - e.g.: - - g(b'Byte string') - - Example use: - - >>> class newbytes(object): - ... @disallow_types([1], [unicode]) - ... def __add__(self, other): - ... pass - - >>> newbytes('1234') + u'1234' #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - TypeError: can't concat 'bytes' to (unicode) str - """ - - def decorator(function): - - @functools.wraps(function) - def wrapper(*args, **kwargs): - # These imports are just for this decorator, and are defined here - # to prevent circular imports: - from .newbytes import newbytes - from .newint import newint - from .newstr import newstr - - errmsg = "argument can't be {0}" - for (argnum, mytype) in zip(argnums, disallowed_types): - # Handle the case where the type is passed as a string like 'newbytes'. - if isinstance(mytype, str) or isinstance(mytype, bytes): - mytype = locals()[mytype] - - # Only restrict kw args only if they are passed: - if len(args) <= argnum: - break - - # Here we use type() rather than isinstance() because - # __instancecheck__ is being overridden. E.g. - # isinstance(b'abc', newbytes) is True on Py2. - if type(args[argnum]) == mytype: - raise TypeError(errmsg.format(mytype)) - - return function(*args, **kwargs) - return wrapper - return decorator - - -def no(mytype, argnums=(1,)): - """ - A shortcut for the disallow_types decorator that disallows only one type - (in any position in argnums). - - Example use: - - >>> class newstr(object): - ... @no('bytes') - ... def __add__(self, other): - ... pass - - >>> newstr(u'1234') + b'1234' #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - TypeError: argument can't be bytes - - The object can also be passed directly, but passing the string helps - to prevent circular import problems. - """ - if isinstance(argnums, Integral): - argnums = (argnums,) - disallowed_types = [mytype] * len(argnums) - return disallow_types(argnums, disallowed_types) - - -def issubset(list1, list2): - """ - Examples: - - >>> issubset([], [65, 66, 67]) - True - >>> issubset([65], [65, 66, 67]) - True - >>> issubset([65, 66], [65, 66, 67]) - True - >>> issubset([65, 67], [65, 66, 67]) - False - """ - n = len(list1) - for startpos in range(len(list2) - n + 1): - if list2[startpos:startpos+n] == list1: - return True - return False - - -if utils.PY3: - import builtins - bytes = builtins.bytes - dict = builtins.dict - int = builtins.int - list = builtins.list - object = builtins.object - range = builtins.range - str = builtins.str - - # The identity mapping - newtypes = {bytes: bytes, - dict: dict, - int: int, - list: list, - object: object, - range: range, - str: str} - - __all__ = ['newtypes'] - -else: - - from .newbytes import newbytes - from .newdict import newdict - from .newint import newint - from .newlist import newlist - from .newrange import newrange - from .newobject import newobject - from .newstr import newstr - - newtypes = {bytes: newbytes, - dict: newdict, - int: newint, - long: newint, - list: newlist, - object: newobject, - range: newrange, - str: newbytes, - unicode: newstr} - - __all__ = ['newbytes', 'newdict', 'newint', 'newlist', 'newrange', 'newstr', 'newtypes'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newbytes.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newbytes.py deleted file mode 100755 index c9d584a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newbytes.py +++ /dev/null @@ -1,460 +0,0 @@ -""" -Pure-Python implementation of a Python 3-like bytes object for Python 2. - -Why do this? Without it, the Python 2 bytes object is a very, very -different beast to the Python 3 bytes object. -""" - -from numbers import Integral -import string -import copy - -from future.utils import istext, isbytes, PY2, PY3, with_metaclass -from future.types import no, issubset -from future.types.newobject import newobject - -if PY2: - from collections import Iterable -else: - from collections.abc import Iterable - - -_builtin_bytes = bytes - -if PY3: - # We'll probably never use newstr on Py3 anyway... - unicode = str - - -class BaseNewBytes(type): - def __instancecheck__(cls, instance): - if cls == newbytes: - return isinstance(instance, _builtin_bytes) - else: - return issubclass(instance.__class__, cls) - - -def _newchr(x): - if isinstance(x, str): # this happens on pypy - return x.encode('ascii') - else: - return chr(x) - - -class newbytes(with_metaclass(BaseNewBytes, _builtin_bytes)): - """ - A backport of the Python 3 bytes object to Py2 - """ - def __new__(cls, *args, **kwargs): - """ - From the Py3 bytes docstring: - - bytes(iterable_of_ints) -> bytes - bytes(string, encoding[, errors]) -> bytes - bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer - bytes(int) -> bytes object of size given by the parameter initialized with null bytes - bytes() -> empty bytes object - - Construct an immutable array of bytes from: - - an iterable yielding integers in range(256) - - a text string encoded using the specified encoding - - any object implementing the buffer API. - - an integer - """ - - encoding = None - errors = None - - if len(args) == 0: - return super(newbytes, cls).__new__(cls) - elif len(args) >= 2: - args = list(args) - if len(args) == 3: - errors = args.pop() - encoding=args.pop() - # Was: elif isinstance(args[0], newbytes): - # We use type() instead of the above because we're redefining - # this to be True for all unicode string subclasses. Warning: - # This may render newstr un-subclassable. - if type(args[0]) == newbytes: - # Special-case: for consistency with Py3.3, we return the same object - # (with the same id) if a newbytes object is passed into the - # newbytes constructor. - return args[0] - elif isinstance(args[0], _builtin_bytes): - value = args[0] - elif isinstance(args[0], unicode): - try: - if 'encoding' in kwargs: - assert encoding is None - encoding = kwargs['encoding'] - if 'errors' in kwargs: - assert errors is None - errors = kwargs['errors'] - except AssertionError: - raise TypeError('Argument given by name and position') - if encoding is None: - raise TypeError('unicode string argument without an encoding') - ### - # Was: value = args[0].encode(**kwargs) - # Python 2.6 string encode() method doesn't take kwargs: - # Use this instead: - newargs = [encoding] - if errors is not None: - newargs.append(errors) - value = args[0].encode(*newargs) - ### - elif hasattr(args[0], '__bytes__'): - value = args[0].__bytes__() - elif isinstance(args[0], Iterable): - if len(args[0]) == 0: - # This could be an empty list or tuple. Return b'' as on Py3. - value = b'' - else: - # Was: elif len(args[0])>0 and isinstance(args[0][0], Integral): - # # It's a list of integers - # But then we can't index into e.g. frozensets. Try to proceed - # anyway. - try: - value = bytearray([_newchr(x) for x in args[0]]) - except: - raise ValueError('bytes must be in range(0, 256)') - elif isinstance(args[0], Integral): - if args[0] < 0: - raise ValueError('negative count') - value = b'\x00' * args[0] - else: - value = args[0] - if type(value) == newbytes: - # Above we use type(...) rather than isinstance(...) because the - # newbytes metaclass overrides __instancecheck__. - # oldbytes(value) gives the wrong thing on Py2: the same - # result as str(value) on Py3, e.g. "b'abc'". (Issue #193). - # So we handle this case separately: - return copy.copy(value) - else: - return super(newbytes, cls).__new__(cls, value) - - def __repr__(self): - return 'b' + super(newbytes, self).__repr__() - - def __str__(self): - return 'b' + "'{0}'".format(super(newbytes, self).__str__()) - - def __getitem__(self, y): - value = super(newbytes, self).__getitem__(y) - if isinstance(y, Integral): - return ord(value) - else: - return newbytes(value) - - def __getslice__(self, *args): - return self.__getitem__(slice(*args)) - - def __contains__(self, key): - if isinstance(key, int): - newbyteskey = newbytes([key]) - # Don't use isinstance() here because we only want to catch - # newbytes, not Python 2 str: - elif type(key) == newbytes: - newbyteskey = key - else: - newbyteskey = newbytes(key) - return issubset(list(newbyteskey), list(self)) - - @no(unicode) - def __add__(self, other): - return newbytes(super(newbytes, self).__add__(other)) - - @no(unicode) - def __radd__(self, left): - return newbytes(left) + self - - @no(unicode) - def __mul__(self, other): - return newbytes(super(newbytes, self).__mul__(other)) - - @no(unicode) - def __rmul__(self, other): - return newbytes(super(newbytes, self).__rmul__(other)) - - def __mod__(self, vals): - if isinstance(vals, newbytes): - vals = _builtin_bytes.__str__(vals) - - elif isinstance(vals, tuple): - newvals = [] - for v in vals: - if isinstance(v, newbytes): - v = _builtin_bytes.__str__(v) - newvals.append(v) - vals = tuple(newvals) - - elif (hasattr(vals.__class__, '__getitem__') and - hasattr(vals.__class__, 'iteritems')): - for k, v in vals.iteritems(): - if isinstance(v, newbytes): - vals[k] = _builtin_bytes.__str__(v) - - return _builtin_bytes.__mod__(self, vals) - - def __imod__(self, other): - return self.__mod__(other) - - def join(self, iterable_of_bytes): - errmsg = 'sequence item {0}: expected bytes, {1} found' - if isbytes(iterable_of_bytes) or istext(iterable_of_bytes): - raise TypeError(errmsg.format(0, type(iterable_of_bytes))) - for i, item in enumerate(iterable_of_bytes): - if istext(item): - raise TypeError(errmsg.format(i, type(item))) - return newbytes(super(newbytes, self).join(iterable_of_bytes)) - - @classmethod - def fromhex(cls, string): - # Only on Py2: - return cls(string.replace(' ', '').decode('hex')) - - @no(unicode) - def find(self, sub, *args): - return super(newbytes, self).find(sub, *args) - - @no(unicode) - def rfind(self, sub, *args): - return super(newbytes, self).rfind(sub, *args) - - @no(unicode, (1, 2)) - def replace(self, old, new, *args): - return newbytes(super(newbytes, self).replace(old, new, *args)) - - def encode(self, *args): - raise AttributeError("encode method has been disabled in newbytes") - - def decode(self, encoding='utf-8', errors='strict'): - """ - Returns a newstr (i.e. unicode subclass) - - Decode B using the codec registered for encoding. Default encoding - is 'utf-8'. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' - as well as any other name registered with codecs.register_error that is - able to handle UnicodeDecodeErrors. - """ - # Py2 str.encode() takes encoding and errors as optional parameter, - # not keyword arguments as in Python 3 str. - - from future.types.newstr import newstr - - if errors == 'surrogateescape': - from future.utils.surrogateescape import register_surrogateescape - register_surrogateescape() - - return newstr(super(newbytes, self).decode(encoding, errors)) - - # This is currently broken: - # # We implement surrogateescape error handling here in addition rather - # # than relying on the custom error handler from - # # future.utils.surrogateescape to be registered globally, even though - # # that is fine in the case of decoding. (But not encoding: see the - # # comments in newstr.encode()``.) - # - # if errors == 'surrogateescape': - # # Decode char by char - # mybytes = [] - # for code in self: - # # Code is an int - # if 0x80 <= code <= 0xFF: - # b = 0xDC00 + code - # elif code <= 0x7F: - # b = _unichr(c).decode(encoding=encoding) - # else: - # # # It may be a bad byte - # # FIXME: What to do in this case? See the Py3 docs / tests. - # # # Try swallowing it. - # # continue - # # print("RAISE!") - # raise NotASurrogateError - # mybytes.append(b) - # return newbytes(mybytes) - # return newbytes(super(newstr, self).decode(encoding, errors)) - - @no(unicode) - def startswith(self, prefix, *args): - return super(newbytes, self).startswith(prefix, *args) - - @no(unicode) - def endswith(self, prefix, *args): - return super(newbytes, self).endswith(prefix, *args) - - @no(unicode) - def split(self, sep=None, maxsplit=-1): - # Py2 str.split() takes maxsplit as an optional parameter, not as a - # keyword argument as in Python 3 bytes. - parts = super(newbytes, self).split(sep, maxsplit) - return [newbytes(part) for part in parts] - - def splitlines(self, keepends=False): - """ - B.splitlines([keepends]) -> list of lines - - Return a list of the lines in B, breaking at line boundaries. - Line breaks are not included in the resulting list unless keepends - is given and true. - """ - # Py2 str.splitlines() takes keepends as an optional parameter, - # not as a keyword argument as in Python 3 bytes. - parts = super(newbytes, self).splitlines(keepends) - return [newbytes(part) for part in parts] - - @no(unicode) - def rsplit(self, sep=None, maxsplit=-1): - # Py2 str.rsplit() takes maxsplit as an optional parameter, not as a - # keyword argument as in Python 3 bytes. - parts = super(newbytes, self).rsplit(sep, maxsplit) - return [newbytes(part) for part in parts] - - @no(unicode) - def partition(self, sep): - parts = super(newbytes, self).partition(sep) - return tuple(newbytes(part) for part in parts) - - @no(unicode) - def rpartition(self, sep): - parts = super(newbytes, self).rpartition(sep) - return tuple(newbytes(part) for part in parts) - - @no(unicode, (1,)) - def rindex(self, sub, *args): - ''' - S.rindex(sub [,start [,end]]) -> int - - Like S.rfind() but raise ValueError when the substring is not found. - ''' - pos = self.rfind(sub, *args) - if pos == -1: - raise ValueError('substring not found') - - @no(unicode) - def index(self, sub, *args): - ''' - Returns index of sub in bytes. - Raises ValueError if byte is not in bytes and TypeError if can't - be converted bytes or its length is not 1. - ''' - if isinstance(sub, int): - if len(args) == 0: - start, end = 0, len(self) - elif len(args) == 1: - start = args[0] - elif len(args) == 2: - start, end = args - else: - raise TypeError('takes at most 3 arguments') - return list(self)[start:end].index(sub) - if not isinstance(sub, bytes): - try: - sub = self.__class__(sub) - except (TypeError, ValueError): - raise TypeError("can't convert sub to bytes") - try: - return super(newbytes, self).index(sub, *args) - except ValueError: - raise ValueError('substring not found') - - def __eq__(self, other): - if isinstance(other, (_builtin_bytes, bytearray)): - return super(newbytes, self).__eq__(other) - else: - return False - - def __ne__(self, other): - if isinstance(other, _builtin_bytes): - return super(newbytes, self).__ne__(other) - else: - return True - - unorderable_err = 'unorderable types: bytes() and {0}' - - def __lt__(self, other): - if isinstance(other, _builtin_bytes): - return super(newbytes, self).__lt__(other) - raise TypeError(self.unorderable_err.format(type(other))) - - def __le__(self, other): - if isinstance(other, _builtin_bytes): - return super(newbytes, self).__le__(other) - raise TypeError(self.unorderable_err.format(type(other))) - - def __gt__(self, other): - if isinstance(other, _builtin_bytes): - return super(newbytes, self).__gt__(other) - raise TypeError(self.unorderable_err.format(type(other))) - - def __ge__(self, other): - if isinstance(other, _builtin_bytes): - return super(newbytes, self).__ge__(other) - raise TypeError(self.unorderable_err.format(type(other))) - - def __native__(self): - # We can't just feed a newbytes object into str(), because - # newbytes.__str__() returns e.g. "b'blah'", consistent with Py3 bytes. - return super(newbytes, self).__str__() - - def __getattribute__(self, name): - """ - A trick to cause the ``hasattr`` builtin-fn to return False for - the 'encode' method on Py2. - """ - if name in ['encode', u'encode']: - raise AttributeError("encode method has been disabled in newbytes") - return super(newbytes, self).__getattribute__(name) - - @no(unicode) - def rstrip(self, bytes_to_strip=None): - """ - Strip trailing bytes contained in the argument. - If the argument is omitted, strip trailing ASCII whitespace. - """ - return newbytes(super(newbytes, self).rstrip(bytes_to_strip)) - - @no(unicode) - def strip(self, bytes_to_strip=None): - """ - Strip leading and trailing bytes contained in the argument. - If the argument is omitted, strip trailing ASCII whitespace. - """ - return newbytes(super(newbytes, self).strip(bytes_to_strip)) - - def lower(self): - """ - b.lower() -> copy of b - - Return a copy of b with all ASCII characters converted to lowercase. - """ - return newbytes(super(newbytes, self).lower()) - - @no(unicode) - def upper(self): - """ - b.upper() -> copy of b - - Return a copy of b with all ASCII characters converted to uppercase. - """ - return newbytes(super(newbytes, self).upper()) - - @classmethod - @no(unicode) - def maketrans(cls, frm, to): - """ - B.maketrans(frm, to) -> translation table - - Return a translation table (a bytes object of length 256) suitable - for use in the bytes or bytearray translate method where each byte - in frm is mapped to the byte at the same position in to. - The bytes objects frm and to must be of the same length. - """ - return newbytes(string.maketrans(frm, to)) - - -__all__ = ['newbytes'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newdict.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newdict.py deleted file mode 100755 index 3f3a559..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newdict.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -A dict subclass for Python 2 that behaves like Python 3's dict - -Example use: - ->>> from builtins import dict ->>> d1 = dict() # instead of {} for an empty dict ->>> d2 = dict(key1='value1', key2='value2') - -The keys, values and items methods now return iterators on Python 2.x -(with set-like behaviour on Python 2.7). - ->>> for d in (d1, d2): -... assert not isinstance(d.keys(), list) -... assert not isinstance(d.values(), list) -... assert not isinstance(d.items(), list) -""" - -import sys - -from future.utils import with_metaclass -from future.types.newobject import newobject - - -_builtin_dict = dict -ver = sys.version_info[:2] - - -class BaseNewDict(type): - def __instancecheck__(cls, instance): - if cls == newdict: - return isinstance(instance, _builtin_dict) - else: - return issubclass(instance.__class__, cls) - - -class newdict(with_metaclass(BaseNewDict, _builtin_dict)): - """ - A backport of the Python 3 dict object to Py2 - """ - def items(self): - """ - On Python 2.7+: - D.items() -> a set-like object providing a view on D's items - On Python 2.6: - D.items() -> an iterator over D's items - """ - if ver == (2, 7): - return self.viewitems() - elif ver == (2, 6): - return self.iteritems() - elif ver >= (3, 0): - return self.items() - - def keys(self): - """ - On Python 2.7+: - D.keys() -> a set-like object providing a view on D's keys - On Python 2.6: - D.keys() -> an iterator over D's keys - """ - if ver == (2, 7): - return self.viewkeys() - elif ver == (2, 6): - return self.iterkeys() - elif ver >= (3, 0): - return self.keys() - - def values(self): - """ - On Python 2.7+: - D.values() -> a set-like object providing a view on D's values - On Python 2.6: - D.values() -> an iterator over D's values - """ - if ver == (2, 7): - return self.viewvalues() - elif ver == (2, 6): - return self.itervalues() - elif ver >= (3, 0): - return self.values() - - def __new__(cls, *args, **kwargs): - """ - dict() -> new empty dictionary - dict(mapping) -> new dictionary initialized from a mapping object's - (key, value) pairs - dict(iterable) -> new dictionary initialized as if via: - d = {} - for k, v in iterable: - d[k] = v - dict(**kwargs) -> new dictionary initialized with the name=value pairs - in the keyword argument list. For example: dict(one=1, two=2) - """ - - if len(args) == 0: - return super(newdict, cls).__new__(cls) - elif type(args[0]) == newdict: - value = args[0] - else: - value = args[0] - return super(newdict, cls).__new__(cls, value) - - def __native__(self): - """ - Hook for the future.utils.native() function - """ - return dict(self) - - -__all__ = ['newdict'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newint.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newint.py deleted file mode 100755 index 748dba9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newint.py +++ /dev/null @@ -1,381 +0,0 @@ -""" -Backport of Python 3's int, based on Py2's long. - -They are very similar. The most notable difference is: - -- representation: trailing L in Python 2 removed in Python 3 -""" -from __future__ import division - -import struct - -from future.types.newbytes import newbytes -from future.types.newobject import newobject -from future.utils import PY3, isint, istext, isbytes, with_metaclass, native - - -if PY3: - long = int - from collections.abc import Iterable -else: - from collections import Iterable - - -class BaseNewInt(type): - def __instancecheck__(cls, instance): - if cls == newint: - # Special case for Py2 short or long int - return isinstance(instance, (int, long)) - else: - return issubclass(instance.__class__, cls) - - -class newint(with_metaclass(BaseNewInt, long)): - """ - A backport of the Python 3 int object to Py2 - """ - def __new__(cls, x=0, base=10): - """ - From the Py3 int docstring: - - | int(x=0) -> integer - | int(x, base=10) -> integer - | - | Convert a number or string to an integer, or return 0 if no - | arguments are given. If x is a number, return x.__int__(). For - | floating point numbers, this truncates towards zero. - | - | If x is not a number or if base is given, then x must be a string, - | bytes, or bytearray instance representing an integer literal in the - | given base. The literal can be preceded by '+' or '-' and be - | surrounded by whitespace. The base defaults to 10. Valid bases are - | 0 and 2-36. Base 0 means to interpret the base from the string as an - | integer literal. - | >>> int('0b100', base=0) - | 4 - - """ - try: - val = x.__int__() - except AttributeError: - val = x - else: - if not isint(val): - raise TypeError('__int__ returned non-int ({0})'.format( - type(val))) - - if base != 10: - # Explicit base - if not (istext(val) or isbytes(val) or isinstance(val, bytearray)): - raise TypeError( - "int() can't convert non-string with explicit base") - try: - return super(newint, cls).__new__(cls, val, base) - except TypeError: - return super(newint, cls).__new__(cls, newbytes(val), base) - # After here, base is 10 - try: - return super(newint, cls).__new__(cls, val) - except TypeError: - # Py2 long doesn't handle bytearray input with an explicit base, so - # handle this here. - # Py3: int(bytearray(b'10'), 2) == 2 - # Py2: int(bytearray(b'10'), 2) == 2 raises TypeError - # Py2: long(bytearray(b'10'), 2) == 2 raises TypeError - try: - return super(newint, cls).__new__(cls, newbytes(val)) - except: - raise TypeError("newint argument must be a string or a number," - "not '{0}'".format(type(val))) - - def __repr__(self): - """ - Without the L suffix - """ - value = super(newint, self).__repr__() - assert value[-1] == 'L' - return value[:-1] - - def __add__(self, other): - value = super(newint, self).__add__(other) - if value is NotImplemented: - return long(self) + other - return newint(value) - - def __radd__(self, other): - value = super(newint, self).__radd__(other) - if value is NotImplemented: - return other + long(self) - return newint(value) - - def __sub__(self, other): - value = super(newint, self).__sub__(other) - if value is NotImplemented: - return long(self) - other - return newint(value) - - def __rsub__(self, other): - value = super(newint, self).__rsub__(other) - if value is NotImplemented: - return other - long(self) - return newint(value) - - def __mul__(self, other): - value = super(newint, self).__mul__(other) - if isint(value): - return newint(value) - elif value is NotImplemented: - return long(self) * other - return value - - def __rmul__(self, other): - value = super(newint, self).__rmul__(other) - if isint(value): - return newint(value) - elif value is NotImplemented: - return other * long(self) - return value - - def __div__(self, other): - # We override this rather than e.g. relying on object.__div__ or - # long.__div__ because we want to wrap the value in a newint() - # call if other is another int - value = long(self) / other - if isinstance(other, (int, long)): - return newint(value) - else: - return value - - def __rdiv__(self, other): - value = other / long(self) - if isinstance(other, (int, long)): - return newint(value) - else: - return value - - def __idiv__(self, other): - # long has no __idiv__ method. Use __itruediv__ and cast back to - # newint: - value = self.__itruediv__(other) - if isinstance(other, (int, long)): - return newint(value) - else: - return value - - def __truediv__(self, other): - value = super(newint, self).__truediv__(other) - if value is NotImplemented: - value = long(self) / other - return value - - def __rtruediv__(self, other): - return super(newint, self).__rtruediv__(other) - - def __itruediv__(self, other): - # long has no __itruediv__ method - mylong = long(self) - mylong /= other - return mylong - - def __floordiv__(self, other): - return newint(super(newint, self).__floordiv__(other)) - - def __rfloordiv__(self, other): - return newint(super(newint, self).__rfloordiv__(other)) - - def __ifloordiv__(self, other): - # long has no __ifloordiv__ method - mylong = long(self) - mylong //= other - return newint(mylong) - - def __mod__(self, other): - value = super(newint, self).__mod__(other) - if value is NotImplemented: - return long(self) % other - return newint(value) - - def __rmod__(self, other): - value = super(newint, self).__rmod__(other) - if value is NotImplemented: - return other % long(self) - return newint(value) - - def __divmod__(self, other): - value = super(newint, self).__divmod__(other) - if value is NotImplemented: - mylong = long(self) - return (mylong // other, mylong % other) - return (newint(value[0]), newint(value[1])) - - def __rdivmod__(self, other): - value = super(newint, self).__rdivmod__(other) - if value is NotImplemented: - mylong = long(self) - return (other // mylong, other % mylong) - return (newint(value[0]), newint(value[1])) - - def __pow__(self, other): - value = super(newint, self).__pow__(other) - if value is NotImplemented: - return long(self) ** other - return newint(value) - - def __rpow__(self, other): - value = super(newint, self).__rpow__(other) - if value is NotImplemented: - return other ** long(self) - return newint(value) - - def __lshift__(self, other): - if not isint(other): - raise TypeError( - "unsupported operand type(s) for <<: '%s' and '%s'" % - (type(self).__name__, type(other).__name__)) - return newint(super(newint, self).__lshift__(other)) - - def __rshift__(self, other): - if not isint(other): - raise TypeError( - "unsupported operand type(s) for >>: '%s' and '%s'" % - (type(self).__name__, type(other).__name__)) - return newint(super(newint, self).__rshift__(other)) - - def __and__(self, other): - if not isint(other): - raise TypeError( - "unsupported operand type(s) for &: '%s' and '%s'" % - (type(self).__name__, type(other).__name__)) - return newint(super(newint, self).__and__(other)) - - def __or__(self, other): - if not isint(other): - raise TypeError( - "unsupported operand type(s) for |: '%s' and '%s'" % - (type(self).__name__, type(other).__name__)) - return newint(super(newint, self).__or__(other)) - - def __xor__(self, other): - if not isint(other): - raise TypeError( - "unsupported operand type(s) for ^: '%s' and '%s'" % - (type(self).__name__, type(other).__name__)) - return newint(super(newint, self).__xor__(other)) - - def __neg__(self): - return newint(super(newint, self).__neg__()) - - def __pos__(self): - return newint(super(newint, self).__pos__()) - - def __abs__(self): - return newint(super(newint, self).__abs__()) - - def __invert__(self): - return newint(super(newint, self).__invert__()) - - def __int__(self): - return self - - def __nonzero__(self): - return self.__bool__() - - def __bool__(self): - """ - So subclasses can override this, Py3-style - """ - return super(newint, self).__nonzero__() - - def __native__(self): - return long(self) - - def to_bytes(self, length, byteorder='big', signed=False): - """ - Return an array of bytes representing an integer. - - The integer is represented using length bytes. An OverflowError is - raised if the integer is not representable with the given number of - bytes. - - The byteorder argument determines the byte order used to represent the - integer. If byteorder is 'big', the most significant byte is at the - beginning of the byte array. If byteorder is 'little', the most - significant byte is at the end of the byte array. To request the native - byte order of the host system, use `sys.byteorder' as the byte order value. - - The signed keyword-only argument determines whether two's complement is - used to represent the integer. If signed is False and a negative integer - is given, an OverflowError is raised. - """ - if length < 0: - raise ValueError("length argument must be non-negative") - if length == 0 and self == 0: - return newbytes() - if signed and self < 0: - bits = length * 8 - num = (2**bits) + self - if num <= 0: - raise OverflowError("int too smal to convert") - else: - if self < 0: - raise OverflowError("can't convert negative int to unsigned") - num = self - if byteorder not in ('little', 'big'): - raise ValueError("byteorder must be either 'little' or 'big'") - h = b'%x' % num - s = newbytes((b'0'*(len(h) % 2) + h).zfill(length*2).decode('hex')) - if signed: - high_set = s[0] & 0x80 - if self > 0 and high_set: - raise OverflowError("int too big to convert") - if self < 0 and not high_set: - raise OverflowError("int too small to convert") - if len(s) > length: - raise OverflowError("int too big to convert") - return s if byteorder == 'big' else s[::-1] - - @classmethod - def from_bytes(cls, mybytes, byteorder='big', signed=False): - """ - Return the integer represented by the given array of bytes. - - The mybytes argument must either support the buffer protocol or be an - iterable object producing bytes. Bytes and bytearray are examples of - built-in objects that support the buffer protocol. - - The byteorder argument determines the byte order used to represent the - integer. If byteorder is 'big', the most significant byte is at the - beginning of the byte array. If byteorder is 'little', the most - significant byte is at the end of the byte array. To request the native - byte order of the host system, use `sys.byteorder' as the byte order value. - - The signed keyword-only argument indicates whether two's complement is - used to represent the integer. - """ - if byteorder not in ('little', 'big'): - raise ValueError("byteorder must be either 'little' or 'big'") - if isinstance(mybytes, unicode): - raise TypeError("cannot convert unicode objects to bytes") - # mybytes can also be passed as a sequence of integers on Py3. - # Test for this: - elif isinstance(mybytes, Iterable): - mybytes = newbytes(mybytes) - b = mybytes if byteorder == 'big' else mybytes[::-1] - if len(b) == 0: - b = b'\x00' - # The encode() method has been disabled by newbytes, but Py2's - # str has it: - num = int(native(b).encode('hex'), 16) - if signed and (b[0] & 0x80): - num = num - (2 ** (len(b)*8)) - return cls(num) - - -# def _twos_comp(val, bits): -# """compute the 2's compliment of int value val""" -# if( (val&(1<<(bits-1))) != 0 ): -# val = val - (1<>> from builtins import list ->>> l1 = list() # instead of {} for an empty list ->>> l1.append('hello') ->>> l2 = l1.copy() - -""" - -import sys -import copy - -from future.utils import with_metaclass -from future.types.newobject import newobject - - -_builtin_list = list -ver = sys.version_info[:2] - - -class BaseNewList(type): - def __instancecheck__(cls, instance): - if cls == newlist: - return isinstance(instance, _builtin_list) - else: - return issubclass(instance.__class__, cls) - - -class newlist(with_metaclass(BaseNewList, _builtin_list)): - """ - A backport of the Python 3 list object to Py2 - """ - def copy(self): - """ - L.copy() -> list -- a shallow copy of L - """ - return copy.copy(self) - - def clear(self): - """L.clear() -> None -- remove all items from L""" - for i in range(len(self)): - self.pop() - - def __new__(cls, *args, **kwargs): - """ - list() -> new empty list - list(iterable) -> new list initialized from iterable's items - """ - - if len(args) == 0: - return super(newlist, cls).__new__(cls) - elif type(args[0]) == newlist: - value = args[0] - else: - value = args[0] - return super(newlist, cls).__new__(cls, value) - - def __add__(self, value): - return newlist(super(newlist, self).__add__(value)) - - def __radd__(self, left): - " left + self " - try: - return newlist(left) + self - except: - return NotImplemented - - def __getitem__(self, y): - """ - x.__getitem__(y) <==> x[y] - - Warning: a bug in Python 2.x prevents indexing via a slice from - returning a newlist object. - """ - if isinstance(y, slice): - return newlist(super(newlist, self).__getitem__(y)) - else: - return super(newlist, self).__getitem__(y) - - def __native__(self): - """ - Hook for the future.utils.native() function - """ - return list(self) - - def __nonzero__(self): - return len(self) > 0 - - -__all__ = ['newlist'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newmemoryview.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newmemoryview.py deleted file mode 100755 index 09f804d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newmemoryview.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -A pretty lame implementation of a memoryview object for Python 2.6. -""" -from numbers import Integral -import string - -from future.utils import istext, isbytes, PY2, with_metaclass -from future.types import no, issubset - -if PY2: - from collections import Iterable -else: - from collections.abc import Iterable - -# class BaseNewBytes(type): -# def __instancecheck__(cls, instance): -# return isinstance(instance, _builtin_bytes) - - -class newmemoryview(object): # with_metaclass(BaseNewBytes, _builtin_bytes)): - """ - A pretty lame backport of the Python 2.7 and Python 3.x - memoryviewview object to Py2.6. - """ - def __init__(self, obj): - return obj - - -__all__ = ['newmemoryview'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newobject.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newobject.py deleted file mode 100755 index 31b84fc..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newobject.py +++ /dev/null @@ -1,117 +0,0 @@ -""" -An object subclass for Python 2 that gives new-style classes written in the -style of Python 3 (with ``__next__`` and unicode-returning ``__str__`` methods) -the appropriate Python 2-style ``next`` and ``__unicode__`` methods for compatible. - -Example use:: - - from builtins import object - - my_unicode_str = u'Unicode string: \u5b54\u5b50' - - class A(object): - def __str__(self): - return my_unicode_str - - a = A() - print(str(a)) - - # On Python 2, these relations hold: - assert unicode(a) == my_unicode_string - assert str(a) == my_unicode_string.encode('utf-8') - - -Another example:: - - from builtins import object - - class Upper(object): - def __init__(self, iterable): - self._iter = iter(iterable) - def __next__(self): # note the Py3 interface - return next(self._iter).upper() - def __iter__(self): - return self - - assert list(Upper('hello')) == list('HELLO') - -""" - - -class newobject(object): - """ - A magical object class that provides Python 2 compatibility methods:: - next - __unicode__ - __nonzero__ - - Subclasses of this class can merely define the Python 3 methods (__next__, - __str__, and __bool__). - """ - def next(self): - if hasattr(self, '__next__'): - return type(self).__next__(self) - raise TypeError('newobject is not an iterator') - - def __unicode__(self): - # All subclasses of the builtin object should have __str__ defined. - # Note that old-style classes do not have __str__ defined. - if hasattr(self, '__str__'): - s = type(self).__str__(self) - else: - s = str(self) - if isinstance(s, unicode): - return s - else: - return s.decode('utf-8') - - def __nonzero__(self): - if hasattr(self, '__bool__'): - return type(self).__bool__(self) - if hasattr(self, '__len__'): - return type(self).__len__(self) - # object has no __nonzero__ method - return True - - # Are these ever needed? - # def __div__(self): - # return self.__truediv__() - - # def __idiv__(self, other): - # return self.__itruediv__(other) - - def __long__(self): - if not hasattr(self, '__int__'): - return NotImplemented - return self.__int__() # not type(self).__int__(self) - - # def __new__(cls, *args, **kwargs): - # """ - # dict() -> new empty dictionary - # dict(mapping) -> new dictionary initialized from a mapping object's - # (key, value) pairs - # dict(iterable) -> new dictionary initialized as if via: - # d = {} - # for k, v in iterable: - # d[k] = v - # dict(**kwargs) -> new dictionary initialized with the name=value pairs - # in the keyword argument list. For example: dict(one=1, two=2) - # """ - - # if len(args) == 0: - # return super(newdict, cls).__new__(cls) - # elif type(args[0]) == newdict: - # return args[0] - # else: - # value = args[0] - # return super(newdict, cls).__new__(cls, value) - - def __native__(self): - """ - Hook for the future.utils.native() function - """ - return object(self) - - __slots__ = [] - -__all__ = ['newobject'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newopen.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newopen.py deleted file mode 100755 index b75d45a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newopen.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -A substitute for the Python 3 open() function. - -Note that io.open() is more complete but maybe slower. Even so, the -completeness may be a better default. TODO: compare these -""" - -_builtin_open = open - -class newopen(object): - """Wrapper providing key part of Python 3 open() interface. - - From IPython's py3compat.py module. License: BSD. - """ - def __init__(self, fname, mode="r", encoding="utf-8"): - self.f = _builtin_open(fname, mode) - self.enc = encoding - - def write(self, s): - return self.f.write(s.encode(self.enc)) - - def read(self, size=-1): - return self.f.read(size).decode(self.enc) - - def close(self): - return self.f.close() - - def __enter__(self): - return self - - def __exit__(self, etype, value, traceback): - self.f.close() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newrange.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newrange.py deleted file mode 100755 index eda01a5..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newrange.py +++ /dev/null @@ -1,170 +0,0 @@ -""" -Nearly identical to xrange.py, by Dan Crosta, from - - https://github.com/dcrosta/xrange.git - -This is included here in the ``future`` package rather than pointed to as -a dependency because there is no package for ``xrange`` on PyPI. It is -also tweaked to appear like a regular Python 3 ``range`` object rather -than a Python 2 xrange. - -From Dan Crosta's README: - - "A pure-Python implementation of Python 2.7's xrange built-in, with - some features backported from the Python 3.x range built-in (which - replaced xrange) in that version." - - Read more at - https://late.am/post/2012/06/18/what-the-heck-is-an-xrange -""" -from __future__ import absolute_import - -from future.utils import PY2 - -if PY2: - from collections import Sequence, Iterator -else: - from collections.abc import Sequence, Iterator -from itertools import islice - -from future.backports.misc import count # with step parameter on Py2.6 -# For backward compatibility with python-future versions < 0.14.4: -_count = count - - -class newrange(Sequence): - """ - Pure-Python backport of Python 3's range object. See `the CPython - documentation for details: - `_ - """ - - def __init__(self, *args): - if len(args) == 1: - start, stop, step = 0, args[0], 1 - elif len(args) == 2: - start, stop, step = args[0], args[1], 1 - elif len(args) == 3: - start, stop, step = args - else: - raise TypeError('range() requires 1-3 int arguments') - - try: - start, stop, step = int(start), int(stop), int(step) - except ValueError: - raise TypeError('an integer is required') - - if step == 0: - raise ValueError('range() arg 3 must not be zero') - elif step < 0: - stop = min(stop, start) - else: - stop = max(stop, start) - - self._start = start - self._stop = stop - self._step = step - self._len = (stop - start) // step + bool((stop - start) % step) - - @property - def start(self): - return self._start - - @property - def stop(self): - return self._stop - - @property - def step(self): - return self._step - - def __repr__(self): - if self._step == 1: - return 'range(%d, %d)' % (self._start, self._stop) - return 'range(%d, %d, %d)' % (self._start, self._stop, self._step) - - def __eq__(self, other): - return (isinstance(other, newrange) and - (self._len == 0 == other._len or - (self._start, self._step, self._len) == - (other._start, other._step, self._len))) - - def __len__(self): - return self._len - - def index(self, value): - """Return the 0-based position of integer `value` in - the sequence this range represents.""" - try: - diff = value - self._start - except TypeError: - raise ValueError('%r is not in range' % value) - quotient, remainder = divmod(diff, self._step) - if remainder == 0 and 0 <= quotient < self._len: - return abs(quotient) - raise ValueError('%r is not in range' % value) - - def count(self, value): - """Return the number of ocurrences of integer `value` - in the sequence this range represents.""" - # a value can occur exactly zero or one times - return int(value in self) - - def __contains__(self, value): - """Return ``True`` if the integer `value` occurs in - the sequence this range represents.""" - try: - self.index(value) - return True - except ValueError: - return False - - def __reversed__(self): - return iter(self[::-1]) - - def __getitem__(self, index): - """Return the element at position ``index`` in the sequence - this range represents, or raise :class:`IndexError` if the - position is out of range.""" - if isinstance(index, slice): - return self.__getitem_slice(index) - if index < 0: - # negative indexes access from the end - index = self._len + index - if index < 0 or index >= self._len: - raise IndexError('range object index out of range') - return self._start + index * self._step - - def __getitem_slice(self, slce): - """Return a range which represents the requested slce - of the sequence represented by this range. - """ - scaled_indices = (self._step * n for n in slce.indices(self._len)) - start_offset, stop_offset, new_step = scaled_indices - return newrange(self._start + start_offset, - self._start + stop_offset, - new_step) - - def __iter__(self): - """Return an iterator which enumerates the elements of the - sequence this range represents.""" - return range_iterator(self) - - -class range_iterator(Iterator): - """An iterator for a :class:`range`. - """ - def __init__(self, range_): - self._stepper = islice(count(range_.start, range_.step), len(range_)) - - def __iter__(self): - return self - - def __next__(self): - return next(self._stepper) - - def next(self): - return next(self._stepper) - - -__all__ = ['newrange'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newstr.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newstr.py deleted file mode 100755 index 8ca191f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/types/newstr.py +++ /dev/null @@ -1,426 +0,0 @@ -""" -This module redefines ``str`` on Python 2.x to be a subclass of the Py2 -``unicode`` type that behaves like the Python 3.x ``str``. - -The main differences between ``newstr`` and Python 2.x's ``unicode`` type are -the stricter type-checking and absence of a `u''` prefix in the representation. - -It is designed to be used together with the ``unicode_literals`` import -as follows: - - >>> from __future__ import unicode_literals - >>> from builtins import str, isinstance - -On Python 3.x and normally on Python 2.x, these expressions hold - - >>> str('blah') is 'blah' - True - >>> isinstance('blah', str) - True - -However, on Python 2.x, with this import: - - >>> from __future__ import unicode_literals - -the same expressions are False: - - >>> str('blah') is 'blah' - False - >>> isinstance('blah', str) - False - -This module is designed to be imported together with ``unicode_literals`` on -Python 2 to bring the meaning of ``str`` back into alignment with unprefixed -string literals (i.e. ``unicode`` subclasses). - -Note that ``str()`` (and ``print()``) would then normally call the -``__unicode__`` method on objects in Python 2. To define string -representations of your objects portably across Py3 and Py2, use the -:func:`python_2_unicode_compatible` decorator in :mod:`future.utils`. - -""" - -from numbers import Number - -from future.utils import PY3, istext, with_metaclass, isnewbytes -from future.types import no, issubset -from future.types.newobject import newobject - - -if PY3: - # We'll probably never use newstr on Py3 anyway... - unicode = str - from collections.abc import Iterable -else: - from collections import Iterable - - -class BaseNewStr(type): - def __instancecheck__(cls, instance): - if cls == newstr: - return isinstance(instance, unicode) - else: - return issubclass(instance.__class__, cls) - - -class newstr(with_metaclass(BaseNewStr, unicode)): - """ - A backport of the Python 3 str object to Py2 - """ - no_convert_msg = "Can't convert '{0}' object to str implicitly" - - def __new__(cls, *args, **kwargs): - """ - From the Py3 str docstring: - - str(object='') -> str - str(bytes_or_buffer[, encoding[, errors]]) -> str - - Create a new string object from the given object. If encoding or - errors is specified, then the object must expose a data buffer - that will be decoded using the given encoding and error handler. - Otherwise, returns the result of object.__str__() (if defined) - or repr(object). - encoding defaults to sys.getdefaultencoding(). - errors defaults to 'strict'. - - """ - if len(args) == 0: - return super(newstr, cls).__new__(cls) - # Special case: If someone requests str(str(u'abc')), return the same - # object (same id) for consistency with Py3.3. This is not true for - # other objects like list or dict. - elif type(args[0]) == newstr and cls == newstr: - return args[0] - elif isinstance(args[0], unicode): - value = args[0] - elif isinstance(args[0], bytes): # i.e. Py2 bytes or newbytes - if 'encoding' in kwargs or len(args) > 1: - value = args[0].decode(*args[1:], **kwargs) - else: - value = args[0].__str__() - else: - value = args[0] - return super(newstr, cls).__new__(cls, value) - - def __repr__(self): - """ - Without the u prefix - """ - - value = super(newstr, self).__repr__() - # assert value[0] == u'u' - return value[1:] - - def __getitem__(self, y): - """ - Warning: Python <= 2.7.6 has a bug that causes this method never to be called - when y is a slice object. Therefore the type of newstr()[:2] is wrong - (unicode instead of newstr). - """ - return newstr(super(newstr, self).__getitem__(y)) - - def __contains__(self, key): - errmsg = "'in ' requires string as left operand, not {0}" - # Don't use isinstance() here because we only want to catch - # newstr, not Python 2 unicode: - if type(key) == newstr: - newkey = key - elif isinstance(key, unicode) or isinstance(key, bytes) and not isnewbytes(key): - newkey = newstr(key) - else: - raise TypeError(errmsg.format(type(key))) - return issubset(list(newkey), list(self)) - - @no('newbytes') - def __add__(self, other): - return newstr(super(newstr, self).__add__(other)) - - @no('newbytes') - def __radd__(self, left): - " left + self " - try: - return newstr(left) + self - except: - return NotImplemented - - def __mul__(self, other): - return newstr(super(newstr, self).__mul__(other)) - - def __rmul__(self, other): - return newstr(super(newstr, self).__rmul__(other)) - - def join(self, iterable): - errmsg = 'sequence item {0}: expected unicode string, found bytes' - for i, item in enumerate(iterable): - # Here we use type() rather than isinstance() because - # __instancecheck__ is being overridden. E.g. - # isinstance(b'abc', newbytes) is True on Py2. - if isnewbytes(item): - raise TypeError(errmsg.format(i)) - # Support use as a staticmethod: str.join('-', ['a', 'b']) - if type(self) == newstr: - return newstr(super(newstr, self).join(iterable)) - else: - return newstr(super(newstr, newstr(self)).join(iterable)) - - @no('newbytes') - def find(self, sub, *args): - return super(newstr, self).find(sub, *args) - - @no('newbytes') - def rfind(self, sub, *args): - return super(newstr, self).rfind(sub, *args) - - @no('newbytes', (1, 2)) - def replace(self, old, new, *args): - return newstr(super(newstr, self).replace(old, new, *args)) - - def decode(self, *args): - raise AttributeError("decode method has been disabled in newstr") - - def encode(self, encoding='utf-8', errors='strict'): - """ - Returns bytes - - Encode S using the codec registered for encoding. Default encoding - is 'utf-8'. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and - 'xmlcharrefreplace' as well as any other name registered with - codecs.register_error that can handle UnicodeEncodeErrors. - """ - from future.types.newbytes import newbytes - # Py2 unicode.encode() takes encoding and errors as optional parameter, - # not keyword arguments as in Python 3 str. - - # For the surrogateescape error handling mechanism, the - # codecs.register_error() function seems to be inadequate for an - # implementation of it when encoding. (Decoding seems fine, however.) - # For example, in the case of - # u'\udcc3'.encode('ascii', 'surrogateescape_handler') - # after registering the ``surrogateescape_handler`` function in - # future.utils.surrogateescape, both Python 2.x and 3.x raise an - # exception anyway after the function is called because the unicode - # string it has to return isn't encodable strictly as ASCII. - - if errors == 'surrogateescape': - if encoding == 'utf-16': - # Known to fail here. See test_encoding_works_normally() - raise NotImplementedError('FIXME: surrogateescape handling is ' - 'not yet implemented properly') - # Encode char by char, building up list of byte-strings - mybytes = [] - for c in self: - code = ord(c) - if 0xD800 <= code <= 0xDCFF: - mybytes.append(newbytes([code - 0xDC00])) - else: - mybytes.append(c.encode(encoding=encoding)) - return newbytes(b'').join(mybytes) - return newbytes(super(newstr, self).encode(encoding, errors)) - - @no('newbytes', 1) - def startswith(self, prefix, *args): - if isinstance(prefix, Iterable): - for thing in prefix: - if isnewbytes(thing): - raise TypeError(self.no_convert_msg.format(type(thing))) - return super(newstr, self).startswith(prefix, *args) - - @no('newbytes', 1) - def endswith(self, prefix, *args): - # Note we need the decorator above as well as the isnewbytes() - # check because prefix can be either a bytes object or e.g. a - # tuple of possible prefixes. (If it's a bytes object, each item - # in it is an int.) - if isinstance(prefix, Iterable): - for thing in prefix: - if isnewbytes(thing): - raise TypeError(self.no_convert_msg.format(type(thing))) - return super(newstr, self).endswith(prefix, *args) - - @no('newbytes', 1) - def split(self, sep=None, maxsplit=-1): - # Py2 unicode.split() takes maxsplit as an optional parameter, - # not as a keyword argument as in Python 3 str. - parts = super(newstr, self).split(sep, maxsplit) - return [newstr(part) for part in parts] - - @no('newbytes', 1) - def rsplit(self, sep=None, maxsplit=-1): - # Py2 unicode.rsplit() takes maxsplit as an optional parameter, - # not as a keyword argument as in Python 3 str. - parts = super(newstr, self).rsplit(sep, maxsplit) - return [newstr(part) for part in parts] - - @no('newbytes', 1) - def partition(self, sep): - parts = super(newstr, self).partition(sep) - return tuple(newstr(part) for part in parts) - - @no('newbytes', 1) - def rpartition(self, sep): - parts = super(newstr, self).rpartition(sep) - return tuple(newstr(part) for part in parts) - - @no('newbytes', 1) - def index(self, sub, *args): - """ - Like newstr.find() but raise ValueError when the substring is not - found. - """ - pos = self.find(sub, *args) - if pos == -1: - raise ValueError('substring not found') - return pos - - def splitlines(self, keepends=False): - """ - S.splitlines(keepends=False) -> list of strings - - Return a list of the lines in S, breaking at line boundaries. - Line breaks are not included in the resulting list unless keepends - is given and true. - """ - # Py2 unicode.splitlines() takes keepends as an optional parameter, - # not as a keyword argument as in Python 3 str. - parts = super(newstr, self).splitlines(keepends) - return [newstr(part) for part in parts] - - def __eq__(self, other): - if (isinstance(other, unicode) or - isinstance(other, bytes) and not isnewbytes(other)): - return super(newstr, self).__eq__(other) - else: - return NotImplemented - - def __hash__(self): - if (isinstance(self, unicode) or - isinstance(self, bytes) and not isnewbytes(self)): - return super(newstr, self).__hash__() - else: - raise NotImplementedError() - - def __ne__(self, other): - if (isinstance(other, unicode) or - isinstance(other, bytes) and not isnewbytes(other)): - return super(newstr, self).__ne__(other) - else: - return True - - unorderable_err = 'unorderable types: str() and {0}' - - def __lt__(self, other): - if (isinstance(other, unicode) or - isinstance(other, bytes) and not isnewbytes(other)): - return super(newstr, self).__lt__(other) - raise TypeError(self.unorderable_err.format(type(other))) - - def __le__(self, other): - if (isinstance(other, unicode) or - isinstance(other, bytes) and not isnewbytes(other)): - return super(newstr, self).__le__(other) - raise TypeError(self.unorderable_err.format(type(other))) - - def __gt__(self, other): - if (isinstance(other, unicode) or - isinstance(other, bytes) and not isnewbytes(other)): - return super(newstr, self).__gt__(other) - raise TypeError(self.unorderable_err.format(type(other))) - - def __ge__(self, other): - if (isinstance(other, unicode) or - isinstance(other, bytes) and not isnewbytes(other)): - return super(newstr, self).__ge__(other) - raise TypeError(self.unorderable_err.format(type(other))) - - def __getattribute__(self, name): - """ - A trick to cause the ``hasattr`` builtin-fn to return False for - the 'decode' method on Py2. - """ - if name in ['decode', u'decode']: - raise AttributeError("decode method has been disabled in newstr") - return super(newstr, self).__getattribute__(name) - - def __native__(self): - """ - A hook for the future.utils.native() function. - """ - return unicode(self) - - @staticmethod - def maketrans(x, y=None, z=None): - """ - Return a translation table usable for str.translate(). - - If there is only one argument, it must be a dictionary mapping Unicode - ordinals (integers) or characters to Unicode ordinals, strings or None. - Character keys will be then converted to ordinals. - If there are two arguments, they must be strings of equal length, and - in the resulting dictionary, each character in x will be mapped to the - character at the same position in y. If there is a third argument, it - must be a string, whose characters will be mapped to None in the result. - """ - - if y is None: - assert z is None - if not isinstance(x, dict): - raise TypeError('if you give only one argument to maketrans it must be a dict') - result = {} - for (key, value) in x.items(): - if len(key) > 1: - raise ValueError('keys in translate table must be strings or integers') - result[ord(key)] = value - else: - if not isinstance(x, unicode) and isinstance(y, unicode): - raise TypeError('x and y must be unicode strings') - if not len(x) == len(y): - raise ValueError('the first two maketrans arguments must have equal length') - result = {} - for (xi, yi) in zip(x, y): - if len(xi) > 1: - raise ValueError('keys in translate table must be strings or integers') - result[ord(xi)] = ord(yi) - - if z is not None: - for char in z: - result[ord(char)] = None - return result - - def translate(self, table): - """ - S.translate(table) -> str - - Return a copy of the string S, where all characters have been mapped - through the given translation table, which must be a mapping of - Unicode ordinals to Unicode ordinals, strings, or None. - Unmapped characters are left untouched. Characters mapped to None - are deleted. - """ - l = [] - for c in self: - if ord(c) in table: - val = table[ord(c)] - if val is None: - continue - elif isinstance(val, unicode): - l.append(val) - else: - l.append(chr(val)) - else: - l.append(c) - return ''.join(l) - - def isprintable(self): - raise NotImplementedError('fixme') - - def isidentifier(self): - raise NotImplementedError('fixme') - - def format_map(self): - raise NotImplementedError('fixme') - - -__all__ = ['newstr'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/utils/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/utils/__init__.py deleted file mode 100755 index 46bd96d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/utils/__init__.py +++ /dev/null @@ -1,767 +0,0 @@ -""" -A selection of cross-compatible functions for Python 2 and 3. - -This module exports useful functions for 2/3 compatible code: - - * bind_method: binds functions to classes - * ``native_str_to_bytes`` and ``bytes_to_native_str`` - * ``native_str``: always equal to the native platform string object (because - this may be shadowed by imports from future.builtins) - * lists: lrange(), lmap(), lzip(), lfilter() - * iterable method compatibility: - - iteritems, iterkeys, itervalues - - viewitems, viewkeys, viewvalues - - These use the original method if available, otherwise they use items, - keys, values. - - * types: - - * text_type: unicode in Python 2, str in Python 3 - * string_types: basestring in Python 2, str in Python 3 - * binary_type: str in Python 2, bytes in Python 3 - * integer_types: (int, long) in Python 2, int in Python 3 - * class_types: (type, types.ClassType) in Python 2, type in Python 3 - - * bchr(c): - Take an integer and make a 1-character byte string - * bord(c) - Take the result of indexing on a byte string and make an integer - * tobytes(s) - Take a text string, a byte string, or a sequence of characters taken - from a byte string, and make a byte string. - - * raise_from() - * raise_with_traceback() - -This module also defines these decorators: - - * ``python_2_unicode_compatible`` - * ``with_metaclass`` - * ``implements_iterator`` - -Some of the functions in this module come from the following sources: - - * Jinja2 (BSD licensed: see - https://github.com/mitsuhiko/jinja2/blob/master/LICENSE) - * Pandas compatibility module pandas.compat - * six.py by Benjamin Peterson - * Django -""" - -import types -import sys -import numbers -import functools -import copy -import inspect - - -PY3 = sys.version_info[0] >= 3 -PY34_PLUS = sys.version_info[0:2] >= (3, 4) -PY35_PLUS = sys.version_info[0:2] >= (3, 5) -PY36_PLUS = sys.version_info[0:2] >= (3, 6) -PY2 = sys.version_info[0] == 2 -PY26 = sys.version_info[0:2] == (2, 6) -PY27 = sys.version_info[0:2] == (2, 7) -PYPY = hasattr(sys, 'pypy_translation_info') - - -def python_2_unicode_compatible(cls): - """ - A decorator that defines __unicode__ and __str__ methods under Python - 2. Under Python 3, this decorator is a no-op. - - To support Python 2 and 3 with a single code base, define a __str__ - method returning unicode text and apply this decorator to the class, like - this:: - - >>> from future.utils import python_2_unicode_compatible - - >>> @python_2_unicode_compatible - ... class MyClass(object): - ... def __str__(self): - ... return u'Unicode string: \u5b54\u5b50' - - >>> a = MyClass() - - Then, after this import: - - >>> from future.builtins import str - - the following is ``True`` on both Python 3 and 2:: - - >>> str(a) == a.encode('utf-8').decode('utf-8') - True - - and, on a Unicode-enabled terminal with the right fonts, these both print the - Chinese characters for Confucius:: - - >>> print(a) - >>> print(str(a)) - - The implementation comes from django.utils.encoding. - """ - if not PY3: - cls.__unicode__ = cls.__str__ - cls.__str__ = lambda self: self.__unicode__().encode('utf-8') - return cls - - -def with_metaclass(meta, *bases): - """ - Function from jinja2/_compat.py. License: BSD. - - Use it like this:: - - class BaseForm(object): - pass - - class FormType(type): - pass - - class Form(with_metaclass(FormType, BaseForm)): - pass - - This requires a bit of explanation: the basic idea is to make a - dummy metaclass for one level of class instantiation that replaces - itself with the actual metaclass. Because of internal type checks - we also need to make sure that we downgrade the custom metaclass - for one level to something closer to type (that's why __call__ and - __init__ comes back from type etc.). - - This has the advantage over six.with_metaclass of not introducing - dummy classes into the final MRO. - """ - class metaclass(meta): - __call__ = type.__call__ - __init__ = type.__init__ - def __new__(cls, name, this_bases, d): - if this_bases is None: - return type.__new__(cls, name, (), d) - return meta(name, bases, d) - return metaclass('temporary_class', None, {}) - - -# Definitions from pandas.compat and six.py follow: -if PY3: - def bchr(s): - return bytes([s]) - def bstr(s): - if isinstance(s, str): - return bytes(s, 'latin-1') - else: - return bytes(s) - def bord(s): - return s - - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - -else: - # Python 2 - def bchr(s): - return chr(s) - def bstr(s): - return str(s) - def bord(s): - return ord(s) - - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - -### - -if PY3: - def tobytes(s): - if isinstance(s, bytes): - return s - else: - if isinstance(s, str): - return s.encode('latin-1') - else: - return bytes(s) -else: - # Python 2 - def tobytes(s): - if isinstance(s, unicode): - return s.encode('latin-1') - else: - return ''.join(s) - -tobytes.__doc__ = """ - Encodes to latin-1 (where the first 256 chars are the same as - ASCII.) - """ - -if PY3: - def native_str_to_bytes(s, encoding='utf-8'): - return s.encode(encoding) - - def bytes_to_native_str(b, encoding='utf-8'): - return b.decode(encoding) - - def text_to_native_str(t, encoding=None): - return t -else: - # Python 2 - def native_str_to_bytes(s, encoding=None): - from future.types import newbytes # to avoid a circular import - return newbytes(s) - - def bytes_to_native_str(b, encoding=None): - return native(b) - - def text_to_native_str(t, encoding='ascii'): - """ - Use this to create a Py2 native string when "from __future__ import - unicode_literals" is in effect. - """ - return unicode(t).encode(encoding) - -native_str_to_bytes.__doc__ = """ - On Py3, returns an encoded string. - On Py2, returns a newbytes type, ignoring the ``encoding`` argument. - """ - -if PY3: - # list-producing versions of the major Python iterating functions - def lrange(*args, **kwargs): - return list(range(*args, **kwargs)) - - def lzip(*args, **kwargs): - return list(zip(*args, **kwargs)) - - def lmap(*args, **kwargs): - return list(map(*args, **kwargs)) - - def lfilter(*args, **kwargs): - return list(filter(*args, **kwargs)) -else: - import __builtin__ - # Python 2-builtin ranges produce lists - lrange = __builtin__.range - lzip = __builtin__.zip - lmap = __builtin__.map - lfilter = __builtin__.filter - - -def isidentifier(s, dotted=False): - ''' - A function equivalent to the str.isidentifier method on Py3 - ''' - if dotted: - return all(isidentifier(a) for a in s.split('.')) - if PY3: - return s.isidentifier() - else: - import re - _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") - return bool(_name_re.match(s)) - - -def viewitems(obj, **kwargs): - """ - Function for iterating over dictionary items with the same set-like - behaviour on Py2.7 as on Py3. - - Passes kwargs to method.""" - func = getattr(obj, "viewitems", None) - if not func: - func = obj.items - return func(**kwargs) - - -def viewkeys(obj, **kwargs): - """ - Function for iterating over dictionary keys with the same set-like - behaviour on Py2.7 as on Py3. - - Passes kwargs to method.""" - func = getattr(obj, "viewkeys", None) - if not func: - func = obj.keys - return func(**kwargs) - - -def viewvalues(obj, **kwargs): - """ - Function for iterating over dictionary values with the same set-like - behaviour on Py2.7 as on Py3. - - Passes kwargs to method.""" - func = getattr(obj, "viewvalues", None) - if not func: - func = obj.values - return func(**kwargs) - - -def iteritems(obj, **kwargs): - """Use this only if compatibility with Python versions before 2.7 is - required. Otherwise, prefer viewitems(). - """ - func = getattr(obj, "iteritems", None) - if not func: - func = obj.items - return func(**kwargs) - - -def iterkeys(obj, **kwargs): - """Use this only if compatibility with Python versions before 2.7 is - required. Otherwise, prefer viewkeys(). - """ - func = getattr(obj, "iterkeys", None) - if not func: - func = obj.keys - return func(**kwargs) - - -def itervalues(obj, **kwargs): - """Use this only if compatibility with Python versions before 2.7 is - required. Otherwise, prefer viewvalues(). - """ - func = getattr(obj, "itervalues", None) - if not func: - func = obj.values - return func(**kwargs) - - -def bind_method(cls, name, func): - """Bind a method to class, python 2 and python 3 compatible. - - Parameters - ---------- - - cls : type - class to receive bound method - name : basestring - name of method on class instance - func : function - function to be bound as method - - Returns - ------- - None - """ - # only python 2 has an issue with bound/unbound methods - if not PY3: - setattr(cls, name, types.MethodType(func, None, cls)) - else: - setattr(cls, name, func) - - -def getexception(): - return sys.exc_info()[1] - - -def _get_caller_globals_and_locals(): - """ - Returns the globals and locals of the calling frame. - - Is there an alternative to frame hacking here? - """ - caller_frame = inspect.stack()[2] - myglobals = caller_frame[0].f_globals - mylocals = caller_frame[0].f_locals - return myglobals, mylocals - - -def _repr_strip(mystring): - """ - Returns the string without any initial or final quotes. - """ - r = repr(mystring) - if r.startswith("'") and r.endswith("'"): - return r[1:-1] - else: - return r - - -if PY3: - def raise_from(exc, cause): - """ - Equivalent to: - - raise EXCEPTION from CAUSE - - on Python 3. (See PEP 3134). - """ - myglobals, mylocals = _get_caller_globals_and_locals() - - # We pass the exception and cause along with other globals - # when we exec(): - myglobals = myglobals.copy() - myglobals['__python_future_raise_from_exc'] = exc - myglobals['__python_future_raise_from_cause'] = cause - execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause" - exec(execstr, myglobals, mylocals) - - def raise_(tp, value=None, tb=None): - """ - A function that matches the Python 2.x ``raise`` statement. This - allows re-raising exceptions with the cls value and traceback on - Python 2 and 3. - """ - if isinstance(tp, BaseException): - # If the first object is an instance, the type of the exception - # is the class of the instance, the instance itself is the value, - # and the second object must be None. - if value is not None: - raise TypeError("instance exception may not have a separate value") - exc = tp - elif isinstance(tp, type) and not issubclass(tp, BaseException): - # If the first object is a class, it becomes the type of the - # exception. - raise TypeError("class must derive from BaseException, not %s" % tp.__name__) - else: - # The second object is used to determine the exception value: If it - # is an instance of the class, the instance becomes the exception - # value. If the second object is a tuple, it is used as the argument - # list for the class constructor; if it is None, an empty argument - # list is used, and any other object is treated as a single argument - # to the constructor. The instance so created by calling the - # constructor is used as the exception value. - if isinstance(value, tp): - exc = value - elif isinstance(value, tuple): - exc = tp(*value) - elif value is None: - exc = tp() - else: - exc = tp(value) - - if exc.__traceback__ is not tb: - raise exc.with_traceback(tb) - raise exc - - def raise_with_traceback(exc, traceback=Ellipsis): - if traceback == Ellipsis: - _, _, traceback = sys.exc_info() - raise exc.with_traceback(traceback) - -else: - def raise_from(exc, cause): - """ - Equivalent to: - - raise EXCEPTION from CAUSE - - on Python 3. (See PEP 3134). - """ - # Is either arg an exception class (e.g. IndexError) rather than - # instance (e.g. IndexError('my message here')? If so, pass the - # name of the class undisturbed through to "raise ... from ...". - if isinstance(exc, type) and issubclass(exc, Exception): - e = exc() - # exc = exc.__name__ - # execstr = "e = " + _repr_strip(exc) + "()" - # myglobals, mylocals = _get_caller_globals_and_locals() - # exec(execstr, myglobals, mylocals) - else: - e = exc - e.__suppress_context__ = False - if isinstance(cause, type) and issubclass(cause, Exception): - e.__cause__ = cause() - e.__cause__.__traceback__ = sys.exc_info()[2] - e.__suppress_context__ = True - elif cause is None: - e.__cause__ = None - e.__suppress_context__ = True - elif isinstance(cause, BaseException): - e.__cause__ = cause - object.__setattr__(e.__cause__, '__traceback__', sys.exc_info()[2]) - e.__suppress_context__ = True - else: - raise TypeError("exception causes must derive from BaseException") - e.__context__ = sys.exc_info()[1] - raise e - - exec(''' -def raise_(tp, value=None, tb=None): - raise tp, value, tb - -def raise_with_traceback(exc, traceback=Ellipsis): - if traceback == Ellipsis: - _, _, traceback = sys.exc_info() - raise exc, None, traceback -'''.strip()) - - -raise_with_traceback.__doc__ = ( -"""Raise exception with existing traceback. -If traceback is not passed, uses sys.exc_info() to get traceback.""" -) - - -# Deprecated alias for backward compatibility with ``future`` versions < 0.11: -reraise = raise_ - - -def implements_iterator(cls): - ''' - From jinja2/_compat.py. License: BSD. - - Use as a decorator like this:: - - @implements_iterator - class UppercasingIterator(object): - def __init__(self, iterable): - self._iter = iter(iterable) - def __iter__(self): - return self - def __next__(self): - return next(self._iter).upper() - - ''' - if PY3: - return cls - else: - cls.next = cls.__next__ - del cls.__next__ - return cls - -if PY3: - get_next = lambda x: x.next -else: - get_next = lambda x: x.__next__ - - -def encode_filename(filename): - if PY3: - return filename - else: - if isinstance(filename, unicode): - return filename.encode('utf-8') - return filename - - -def is_new_style(cls): - """ - Python 2.7 has both new-style and old-style classes. Old-style classes can - be pesky in some circumstances, such as when using inheritance. Use this - function to test for whether a class is new-style. (Python 3 only has - new-style classes.) - """ - return hasattr(cls, '__class__') and ('__dict__' in dir(cls) - or hasattr(cls, '__slots__')) - -# The native platform string and bytes types. Useful because ``str`` and -# ``bytes`` are redefined on Py2 by ``from future.builtins import *``. -native_str = str -native_bytes = bytes - - -def istext(obj): - """ - Deprecated. Use:: - >>> isinstance(obj, str) - after this import: - >>> from future.builtins import str - """ - return isinstance(obj, type(u'')) - - -def isbytes(obj): - """ - Deprecated. Use:: - >>> isinstance(obj, bytes) - after this import: - >>> from future.builtins import bytes - """ - return isinstance(obj, type(b'')) - - -def isnewbytes(obj): - """ - Equivalent to the result of ``type(obj) == type(newbytes)`` - in other words, it is REALLY a newbytes instance, not a Py2 native str - object? - - Note that this does not cover subclasses of newbytes, and it is not - equivalent to ininstance(obj, newbytes) - """ - return type(obj).__name__ == 'newbytes' - - -def isint(obj): - """ - Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or - ``long``. - - Instead of using this function, you can use: - - >>> from future.builtins import int - >>> isinstance(obj, int) - - The following idiom is equivalent: - - >>> from numbers import Integral - >>> isinstance(obj, Integral) - """ - - return isinstance(obj, numbers.Integral) - - -def native(obj): - """ - On Py3, this is a no-op: native(obj) -> obj - - On Py2, returns the corresponding native Py2 types that are - superclasses for backported objects from Py3: - - >>> from builtins import str, bytes, int - - >>> native(str(u'ABC')) - u'ABC' - >>> type(native(str(u'ABC'))) - unicode - - >>> native(bytes(b'ABC')) - b'ABC' - >>> type(native(bytes(b'ABC'))) - bytes - - >>> native(int(10**20)) - 100000000000000000000L - >>> type(native(int(10**20))) - long - - Existing native types on Py2 will be returned unchanged: - - >>> type(native(u'ABC')) - unicode - """ - if hasattr(obj, '__native__'): - return obj.__native__() - else: - return obj - - -# Implementation of exec_ is from ``six``: -if PY3: - import builtins - exec_ = getattr(builtins, "exec") -else: - def exec_(code, globs=None, locs=None): - """Execute code in a namespace.""" - if globs is None: - frame = sys._getframe(1) - globs = frame.f_globals - if locs is None: - locs = frame.f_locals - del frame - elif locs is None: - locs = globs - exec("""exec code in globs, locs""") - - -# Defined here for backward compatibility: -def old_div(a, b): - """ - DEPRECATED: import ``old_div`` from ``past.utils`` instead. - - Equivalent to ``a / b`` on Python 2 without ``from __future__ import - division``. - - TODO: generalize this to other objects (like arrays etc.) - """ - if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral): - return a // b - else: - return a / b - - -def as_native_str(encoding='utf-8'): - ''' - A decorator to turn a function or method call that returns text, i.e. - unicode, into one that returns a native platform str. - - Use it as a decorator like this:: - - from __future__ import unicode_literals - - class MyClass(object): - @as_native_str(encoding='ascii') - def __repr__(self): - return next(self._iter).upper() - ''' - if PY3: - return lambda f: f - else: - def encoder(f): - @functools.wraps(f) - def wrapper(*args, **kwargs): - return f(*args, **kwargs).encode(encoding=encoding) - return wrapper - return encoder - -# listvalues and listitems definitions from Nick Coghlan's (withdrawn) -# PEP 496: -try: - dict.iteritems -except AttributeError: - # Python 3 - def listvalues(d): - return list(d.values()) - def listitems(d): - return list(d.items()) -else: - # Python 2 - def listvalues(d): - return d.values() - def listitems(d): - return d.items() - -if PY3: - def ensure_new_type(obj): - return obj -else: - def ensure_new_type(obj): - from future.types.newbytes import newbytes - from future.types.newstr import newstr - from future.types.newint import newint - from future.types.newdict import newdict - - native_type = type(native(obj)) - - # Upcast only if the type is already a native (non-future) type - if issubclass(native_type, type(obj)): - # Upcast - if native_type == str: # i.e. Py2 8-bit str - return newbytes(obj) - elif native_type == unicode: - return newstr(obj) - elif native_type == int: - return newint(obj) - elif native_type == long: - return newint(obj) - elif native_type == dict: - return newdict(obj) - else: - return obj - else: - # Already a new type - assert type(obj) in [newbytes, newstr] - return obj - - -__all__ = ['PY2', 'PY26', 'PY3', 'PYPY', - 'as_native_str', 'binary_type', 'bind_method', 'bord', 'bstr', - 'bytes_to_native_str', 'class_types', 'encode_filename', - 'ensure_new_type', 'exec_', 'get_next', 'getexception', - 'implements_iterator', 'integer_types', 'is_new_style', 'isbytes', - 'isidentifier', 'isint', 'isnewbytes', 'istext', 'iteritems', - 'iterkeys', 'itervalues', 'lfilter', 'listitems', 'listvalues', - 'lmap', 'lrange', 'lzip', 'native', 'native_bytes', 'native_str', - 'native_str_to_bytes', 'old_div', - 'python_2_unicode_compatible', 'raise_', - 'raise_with_traceback', 'reraise', 'string_types', - 'text_to_native_str', 'text_type', 'tobytes', 'viewitems', - 'viewkeys', 'viewvalues', 'with_metaclass' - ] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/utils/surrogateescape.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/utils/surrogateescape.py deleted file mode 100755 index 0dcc9fa..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/utils/surrogateescape.py +++ /dev/null @@ -1,198 +0,0 @@ -""" -This is Victor Stinner's pure-Python implementation of PEP 383: the "surrogateescape" error -handler of Python 3. - -Source: misc/python/surrogateescape.py in https://bitbucket.org/haypo/misc -""" - -# This code is released under the Python license and the BSD 2-clause license - -import codecs -import sys - -from future import utils - - -FS_ERRORS = 'surrogateescape' - -# # -- Python 2/3 compatibility ------------------------------------- -# FS_ERRORS = 'my_surrogateescape' - -def u(text): - if utils.PY3: - return text - else: - return text.decode('unicode_escape') - -def b(data): - if utils.PY3: - return data.encode('latin1') - else: - return data - -if utils.PY3: - _unichr = chr - bytes_chr = lambda code: bytes((code,)) -else: - _unichr = unichr - bytes_chr = chr - -def surrogateescape_handler(exc): - """ - Pure Python implementation of the PEP 383: the "surrogateescape" error - handler of Python 3. Undecodable bytes will be replaced by a Unicode - character U+DCxx on decoding, and these are translated into the - original bytes on encoding. - """ - mystring = exc.object[exc.start:exc.end] - - try: - if isinstance(exc, UnicodeDecodeError): - # mystring is a byte-string in this case - decoded = replace_surrogate_decode(mystring) - elif isinstance(exc, UnicodeEncodeError): - # In the case of u'\udcc3'.encode('ascii', - # 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an - # exception anyway after this function is called, even though I think - # it's doing what it should. It seems that the strict encoder is called - # to encode the unicode string that this function returns ... - decoded = replace_surrogate_encode(mystring) - else: - raise exc - except NotASurrogateError: - raise exc - return (decoded, exc.end) - - -class NotASurrogateError(Exception): - pass - - -def replace_surrogate_encode(mystring): - """ - Returns a (unicode) string, not the more logical bytes, because the codecs - register_error functionality expects this. - """ - decoded = [] - for ch in mystring: - # if utils.PY3: - # code = ch - # else: - code = ord(ch) - - # The following magic comes from Py3.3's Python/codecs.c file: - if not 0xD800 <= code <= 0xDCFF: - # Not a surrogate. Fail with the original exception. - raise NotASurrogateError - # mybytes = [0xe0 | (code >> 12), - # 0x80 | ((code >> 6) & 0x3f), - # 0x80 | (code & 0x3f)] - # Is this a good idea? - if 0xDC00 <= code <= 0xDC7F: - decoded.append(_unichr(code - 0xDC00)) - elif code <= 0xDCFF: - decoded.append(_unichr(code - 0xDC00)) - else: - raise NotASurrogateError - return str().join(decoded) - - -def replace_surrogate_decode(mybytes): - """ - Returns a (unicode) string - """ - decoded = [] - for ch in mybytes: - # We may be parsing newbytes (in which case ch is an int) or a native - # str on Py2 - if isinstance(ch, int): - code = ch - else: - code = ord(ch) - if 0x80 <= code <= 0xFF: - decoded.append(_unichr(0xDC00 + code)) - elif code <= 0x7F: - decoded.append(_unichr(code)) - else: - # # It may be a bad byte - # # Try swallowing it. - # continue - # print("RAISE!") - raise NotASurrogateError - return str().join(decoded) - - -def encodefilename(fn): - if FS_ENCODING == 'ascii': - # ASCII encoder of Python 2 expects that the error handler returns a - # Unicode string encodable to ASCII, whereas our surrogateescape error - # handler has to return bytes in 0x80-0xFF range. - encoded = [] - for index, ch in enumerate(fn): - code = ord(ch) - if code < 128: - ch = bytes_chr(code) - elif 0xDC80 <= code <= 0xDCFF: - ch = bytes_chr(code - 0xDC00) - else: - raise UnicodeEncodeError(FS_ENCODING, - fn, index, index+1, - 'ordinal not in range(128)') - encoded.append(ch) - return bytes().join(encoded) - elif FS_ENCODING == 'utf-8': - # UTF-8 encoder of Python 2 encodes surrogates, so U+DC80-U+DCFF - # doesn't go through our error handler - encoded = [] - for index, ch in enumerate(fn): - code = ord(ch) - if 0xD800 <= code <= 0xDFFF: - if 0xDC80 <= code <= 0xDCFF: - ch = bytes_chr(code - 0xDC00) - encoded.append(ch) - else: - raise UnicodeEncodeError( - FS_ENCODING, - fn, index, index+1, 'surrogates not allowed') - else: - ch_utf8 = ch.encode('utf-8') - encoded.append(ch_utf8) - return bytes().join(encoded) - else: - return fn.encode(FS_ENCODING, FS_ERRORS) - -def decodefilename(fn): - return fn.decode(FS_ENCODING, FS_ERRORS) - -FS_ENCODING = 'ascii'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]') -# FS_ENCODING = 'cp932'; fn = b('[abc\x81\x00]'); encoded = u('[abc\udc81\x00]') -# FS_ENCODING = 'UTF-8'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]') - - -# normalize the filesystem encoding name. -# For example, we expect "utf-8", not "UTF8". -FS_ENCODING = codecs.lookup(FS_ENCODING).name - - -def register_surrogateescape(): - """ - Registers the surrogateescape error handler on Python 2 (only) - """ - if utils.PY3: - return - try: - codecs.lookup_error(FS_ERRORS) - except LookupError: - codecs.register_error(FS_ERRORS, surrogateescape_handler) - - -if __name__ == '__main__': - pass - # # Tests: - # register_surrogateescape() - - # b = decodefilename(fn) - # assert b == encoded, "%r != %r" % (b, encoded) - # c = encodefilename(b) - # assert c == fn, '%r != %r' % (c, fn) - # # print("ok") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/LICENSE deleted file mode 100644 index ae38286..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Httplib2 Software License - -Copyright (c) 2006 by Joe Gregorio - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/METADATA deleted file mode 100644 index 276c443..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/METADATA +++ /dev/null @@ -1,71 +0,0 @@ -Metadata-Version: 2.1 -Name: httplib2 -Version: 0.19.1 -Summary: A comprehensive HTTP client library. -Home-page: https://github.com/httplib2/httplib2 -Author: Joe Gregorio -Author-email: joe@bitworking.org -License: MIT -Platform: UNKNOWN -Classifier: Development Status :: 4 - Beta -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Topic :: Internet :: WWW/HTTP -Classifier: Topic :: Software Development :: Libraries -Requires-Dist: pyparsing (<3,>=2.4.2) - - - -A comprehensive HTTP client library, ``httplib2`` supports many features left out of other HTTP libraries. - -**HTTP and HTTPS** - HTTPS support is only available if the socket module was compiled with SSL support. - - -**Keep-Alive** - Supports HTTP 1.1 Keep-Alive, keeping the socket open and performing multiple requests over the same connection if possible. - - -**Authentication** - The following three types of HTTP Authentication are supported. These can be used over both HTTP and HTTPS. - - * Digest - * Basic - * WSSE - -**Caching** - The module can optionally operate with a private cache that understands the Cache-Control: - header and uses both the ETag and Last-Modified cache validators. Both file system - and memcached based caches are supported. - - -**All Methods** - The module can handle any HTTP request method, not just GET and POST. - - -**Redirects** - Automatically follows 3XX redirects on GETs. - - -**Compression** - Handles both 'deflate' and 'gzip' types of compression. - - -**Lost update support** - Automatically adds back ETags into PUT requests to resources we have already cached. This implements Section 3.2 of Detecting the Lost Update Problem Using Unreserved Checkout - - -**Unit Tested** - A large and growing set of unit tests. - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/RECORD deleted file mode 100644 index dd4f614..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/RECORD +++ /dev/null @@ -1,13 +0,0 @@ -httplib2-0.19.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -httplib2-0.19.1.dist-info/LICENSE,sha256=WJ7sOPct8r4gNxHTuMvs6bkIxef_ALw8q39juunjZrQ,1086 -httplib2-0.19.1.dist-info/METADATA,sha256=Y773x9o8W64zxHwc9LyJIyAzCAQsnyvWDyrUs7l1l50,2235 -httplib2-0.19.1.dist-info/RECORD,, -httplib2-0.19.1.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 -httplib2-0.19.1.dist-info/top_level.txt,sha256=BEY8ChKwagUWmu9x8yN9JObJpZKNeWCr1E-sIECb56I,9 -httplib2/__init__.py,sha256=BIXewNb18bHReQm1J-NWasURlkO0UjjEgup2UadKP88,68412 -httplib2/auth.py,sha256=IdJCKqMC2nx7O5wbYwfO04m1X3miYW5JAZ9Wn5eQZi4,2026 -httplib2/cacerts.txt,sha256=AQyadVjp1sEIG0yIiMJ82l52hplPo3odJIyTSS_sONw,135547 -httplib2/certs.py,sha256=guhfjMNhDdKJEyYBb5ZyLxVO5q1I7Y_P-4BG8MniBk8,971 -httplib2/error.py,sha256=GyqPUvZeKdVLq0f3xg0uX4rjtv7jVGJuPerAdyc-jfk,954 -httplib2/iri2uri.py,sha256=PhIzEzeR6C73l7piwrNAJlVvlWgsqxtJTlFeXgznzQo,4153 -httplib2/socks.py,sha256=oaeEOnT2rkTNm6wnn0CSdhWzVaVshnnkAKiP4kxKzzc,19701 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/top_level.txt deleted file mode 100644 index fb881ec..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -httplib2 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/__init__.py deleted file mode 100755 index 8b240db..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/__init__.py +++ /dev/null @@ -1,1783 +0,0 @@ -# -*- coding: utf-8 -*- -"""Small, fast HTTP client library for Python.""" - -__author__ = "Joe Gregorio (joe@bitworking.org)" -__copyright__ = "Copyright 2006, Joe Gregorio" -__contributors__ = [ - "Thomas Broyer (t.broyer@ltgt.net)", - "James Antill", - "Xavier Verges Farrero", - "Jonathan Feinberg", - "Blair Zajac", - "Sam Ruby", - "Louis Nyffenegger", - "Mark Pilgrim", - "Alex Yu", -] -__license__ = "MIT" -__version__ = "0.19.1" - -import base64 -import calendar -import copy -import email -import email.feedparser -from email import header -import email.message -import email.utils -import errno -from gettext import gettext as _ -import gzip -from hashlib import md5 as _md5 -from hashlib import sha1 as _sha -import hmac -import http.client -import io -import os -import random -import re -import socket -import ssl -import sys -import time -import urllib.parse -import zlib - -try: - import socks -except ImportError: - # TODO: remove this fallback and copypasted socksipy module upon py2/3 merge, - # idea is to have soft-dependency on any compatible module called socks - from . import socks -from . import auth -from .error import * -from .iri2uri import iri2uri - - -def has_timeout(timeout): - if hasattr(socket, "_GLOBAL_DEFAULT_TIMEOUT"): - return timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT - return timeout is not None - - -__all__ = [ - "debuglevel", - "FailedToDecompressContent", - "Http", - "HttpLib2Error", - "ProxyInfo", - "RedirectLimit", - "RedirectMissingLocation", - "Response", - "RETRIES", - "UnimplementedDigestAuthOptionError", - "UnimplementedHmacDigestAuthOptionError", -] - -# The httplib debug level, set to a non-zero value to get debug output -debuglevel = 0 - -# A request will be tried 'RETRIES' times if it fails at the socket/connection level. -RETRIES = 2 - - -# Open Items: -# ----------- - -# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) - -# Pluggable cache storage (supports storing the cache in -# flat files by default. We need a plug-in architecture -# that can support Berkeley DB and Squid) - -# == Known Issues == -# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. -# Does not handle Cache-Control: max-stale -# Does not use Age: headers when calculating cache freshness. - -# The number of redirections to follow before giving up. -# Note that only GET redirects are automatically followed. -# Will also honor 301 requests by saving that info and never -# requesting that URI again. -DEFAULT_MAX_REDIRECTS = 5 - -# Which headers are hop-by-hop headers by default -HOP_BY_HOP = [ - "connection", - "keep-alive", - "proxy-authenticate", - "proxy-authorization", - "te", - "trailers", - "transfer-encoding", - "upgrade", -] - -# https://tools.ietf.org/html/rfc7231#section-8.1.3 -SAFE_METHODS = ("GET", "HEAD", "OPTIONS", "TRACE") - -# To change, assign to `Http().redirect_codes` -REDIRECT_CODES = frozenset((300, 301, 302, 303, 307, 308)) - - -from httplib2 import certs - -CA_CERTS = certs.where() - -# PROTOCOL_TLS is python 3.5.3+. PROTOCOL_SSLv23 is deprecated. -# Both PROTOCOL_TLS and PROTOCOL_SSLv23 are equivalent and means: -# > Selects the highest protocol version that both the client and server support. -# > Despite the name, this option can select “TLS” protocols as well as “SSL”. -# source: https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLS -DEFAULT_TLS_VERSION = getattr(ssl, "PROTOCOL_TLS", None) or getattr(ssl, "PROTOCOL_SSLv23") - - -def _build_ssl_context( - disable_ssl_certificate_validation, - ca_certs, - cert_file=None, - key_file=None, - maximum_version=None, - minimum_version=None, - key_password=None, -): - if not hasattr(ssl, "SSLContext"): - raise RuntimeError("httplib2 requires Python 3.2+ for ssl.SSLContext") - - context = ssl.SSLContext(DEFAULT_TLS_VERSION) - context.verify_mode = ssl.CERT_NONE if disable_ssl_certificate_validation else ssl.CERT_REQUIRED - - # SSLContext.maximum_version and SSLContext.minimum_version are python 3.7+. - # source: https://docs.python.org/3/library/ssl.html#ssl.SSLContext.maximum_version - if maximum_version is not None: - if hasattr(context, "maximum_version"): - context.maximum_version = getattr(ssl.TLSVersion, maximum_version) - else: - raise RuntimeError("setting tls_maximum_version requires Python 3.7 and OpenSSL 1.1 or newer") - if minimum_version is not None: - if hasattr(context, "minimum_version"): - context.minimum_version = getattr(ssl.TLSVersion, minimum_version) - else: - raise RuntimeError("setting tls_minimum_version requires Python 3.7 and OpenSSL 1.1 or newer") - - # check_hostname requires python 3.4+ - # we will perform the equivalent in HTTPSConnectionWithTimeout.connect() by calling ssl.match_hostname - # if check_hostname is not supported. - if hasattr(context, "check_hostname"): - context.check_hostname = not disable_ssl_certificate_validation - - context.load_verify_locations(ca_certs) - - if cert_file: - context.load_cert_chain(cert_file, key_file, key_password) - - return context - - -def _get_end2end_headers(response): - hopbyhop = list(HOP_BY_HOP) - hopbyhop.extend([x.strip() for x in response.get("connection", "").split(",")]) - return [header for header in list(response.keys()) if header not in hopbyhop] - - -URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") - - -def parse_uri(uri): - """Parses a URI using the regex given in Appendix B of RFC 3986. - - (scheme, authority, path, query, fragment) = parse_uri(uri) - """ - groups = URI.match(uri).groups() - return (groups[1], groups[3], groups[4], groups[6], groups[8]) - - -def urlnorm(uri): - (scheme, authority, path, query, fragment) = parse_uri(uri) - if not scheme or not authority: - raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) - authority = authority.lower() - scheme = scheme.lower() - if not path: - path = "/" - # Could do syntax based normalization of the URI before - # computing the digest. See Section 6.2.2 of Std 66. - request_uri = query and "?".join([path, query]) or path - scheme = scheme.lower() - defrag_uri = scheme + "://" + authority + request_uri - return scheme, authority, request_uri, defrag_uri - - -# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) -re_url_scheme = re.compile(r"^\w+://") -re_unsafe = re.compile(r"[^\w\-_.()=!]+", re.ASCII) - - -def safename(filename): - """Return a filename suitable for the cache. - Strips dangerous and common characters to create a filename we - can use to store the cache in. - """ - if isinstance(filename, bytes): - filename_bytes = filename - filename = filename.decode("utf-8") - else: - filename_bytes = filename.encode("utf-8") - filemd5 = _md5(filename_bytes).hexdigest() - filename = re_url_scheme.sub("", filename) - filename = re_unsafe.sub("", filename) - - # limit length of filename (vital for Windows) - # https://github.com/httplib2/httplib2/pull/74 - # C:\Users\ \AppData\Local\Temp\ , - # 9 chars + max 104 chars + 20 chars + x + 1 + 32 = max 259 chars - # Thus max safe filename x = 93 chars. Let it be 90 to make a round sum: - filename = filename[:90] - - return ",".join((filename, filemd5)) - - -NORMALIZE_SPACE = re.compile(r"(?:\r\n)?[ \t]+") - - -def _normalize_headers(headers): - return dict( - [ - (_convert_byte_str(key).lower(), NORMALIZE_SPACE.sub(_convert_byte_str(value), " ").strip(),) - for (key, value) in headers.items() - ] - ) - - -def _convert_byte_str(s): - if not isinstance(s, str): - return str(s, "utf-8") - return s - - -def _parse_cache_control(headers): - retval = {} - if "cache-control" in headers: - parts = headers["cache-control"].split(",") - parts_with_args = [ - tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=") - ] - parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")] - retval = dict(parts_with_args + parts_wo_args) - return retval - - -# Whether to use a strict mode to parse WWW-Authenticate headers -# Might lead to bad results in case of ill-formed header value, -# so disabled by default, falling back to relaxed parsing. -# Set to true to turn on, useful for testing servers. -USE_WWW_AUTH_STRICT_PARSING = 0 - - -def _entry_disposition(response_headers, request_headers): - """Determine freshness from the Date, Expires and Cache-Control headers. - - We don't handle the following: - - 1. Cache-Control: max-stale - 2. Age: headers are not used in the calculations. - - Not that this algorithm is simpler than you might think - because we are operating as a private (non-shared) cache. - This lets us ignore 's-maxage'. We can also ignore - 'proxy-invalidate' since we aren't a proxy. - We will never return a stale document as - fresh as a design decision, and thus the non-implementation - of 'max-stale'. This also lets us safely ignore 'must-revalidate' - since we operate as if every server has sent 'must-revalidate'. - Since we are private we get to ignore both 'public' and - 'private' parameters. We also ignore 'no-transform' since - we don't do any transformations. - The 'no-store' parameter is handled at a higher level. - So the only Cache-Control parameters we look at are: - - no-cache - only-if-cached - max-age - min-fresh - """ - - retval = "STALE" - cc = _parse_cache_control(request_headers) - cc_response = _parse_cache_control(response_headers) - - if "pragma" in request_headers and request_headers["pragma"].lower().find("no-cache") != -1: - retval = "TRANSPARENT" - if "cache-control" not in request_headers: - request_headers["cache-control"] = "no-cache" - elif "no-cache" in cc: - retval = "TRANSPARENT" - elif "no-cache" in cc_response: - retval = "STALE" - elif "only-if-cached" in cc: - retval = "FRESH" - elif "date" in response_headers: - date = calendar.timegm(email.utils.parsedate_tz(response_headers["date"])) - now = time.time() - current_age = max(0, now - date) - if "max-age" in cc_response: - try: - freshness_lifetime = int(cc_response["max-age"]) - except ValueError: - freshness_lifetime = 0 - elif "expires" in response_headers: - expires = email.utils.parsedate_tz(response_headers["expires"]) - if None == expires: - freshness_lifetime = 0 - else: - freshness_lifetime = max(0, calendar.timegm(expires) - date) - else: - freshness_lifetime = 0 - if "max-age" in cc: - try: - freshness_lifetime = int(cc["max-age"]) - except ValueError: - freshness_lifetime = 0 - if "min-fresh" in cc: - try: - min_fresh = int(cc["min-fresh"]) - except ValueError: - min_fresh = 0 - current_age += min_fresh - if freshness_lifetime > current_age: - retval = "FRESH" - return retval - - -def _decompressContent(response, new_content): - content = new_content - try: - encoding = response.get("content-encoding", None) - if encoding in ["gzip", "deflate"]: - if encoding == "gzip": - content = gzip.GzipFile(fileobj=io.BytesIO(new_content)).read() - if encoding == "deflate": - content = zlib.decompress(content, -zlib.MAX_WBITS) - response["content-length"] = str(len(content)) - # Record the historical presence of the encoding in a way the won't interfere. - response["-content-encoding"] = response["content-encoding"] - del response["content-encoding"] - except (IOError, zlib.error): - content = "" - raise FailedToDecompressContent( - _("Content purported to be compressed with %s but failed to decompress.") % response.get("content-encoding"), - response, - content, - ) - return content - - -def _bind_write_headers(msg): - def _write_headers(self): - # Self refers to the Generator object. - for h, v in msg.items(): - print("%s:" % h, end=" ", file=self._fp) - if isinstance(v, header.Header): - print(v.encode(maxlinelen=self._maxheaderlen), file=self._fp) - else: - # email.Header got lots of smarts, so use it. - headers = header.Header(v, maxlinelen=self._maxheaderlen, charset="utf-8", header_name=h) - print(headers.encode(), file=self._fp) - # A blank line always separates headers from body. - print(file=self._fp) - - return _write_headers - - -def _updateCache(request_headers, response_headers, content, cache, cachekey): - if cachekey: - cc = _parse_cache_control(request_headers) - cc_response = _parse_cache_control(response_headers) - if "no-store" in cc or "no-store" in cc_response: - cache.delete(cachekey) - else: - info = email.message.Message() - for key, value in response_headers.items(): - if key not in ["status", "content-encoding", "transfer-encoding"]: - info[key] = value - - # Add annotations to the cache to indicate what headers - # are variant for this request. - vary = response_headers.get("vary", None) - if vary: - vary_headers = vary.lower().replace(" ", "").split(",") - for header in vary_headers: - key = "-varied-%s" % header - try: - info[key] = request_headers[header] - except KeyError: - pass - - status = response_headers.status - if status == 304: - status = 200 - - status_header = "status: %d\r\n" % status - - try: - header_str = info.as_string() - except UnicodeEncodeError: - setattr(info, "_write_headers", _bind_write_headers(info)) - header_str = info.as_string() - - header_str = re.sub("\r(?!\n)|(? 0: - service = "cl" - # No point in guessing Base or Spreadsheet - # elif request_uri.find("spreadsheets") > 0: - # service = "wise" - - auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers["user-agent"],) - resp, content = self.http.request( - "https://www.google.com/accounts/ClientLogin", - method="POST", - body=urlencode(auth), - headers={"Content-Type": "application/x-www-form-urlencoded"}, - ) - lines = content.split("\n") - d = dict([tuple(line.split("=", 1)) for line in lines if line]) - if resp.status == 403: - self.Auth = "" - else: - self.Auth = d["Auth"] - - def request(self, method, request_uri, headers, content): - """Modify the request headers to add the appropriate - Authorization header.""" - headers["authorization"] = "GoogleLogin Auth=" + self.Auth - - -AUTH_SCHEME_CLASSES = { - "basic": BasicAuthentication, - "wsse": WsseAuthentication, - "digest": DigestAuthentication, - "hmacdigest": HmacDigestAuthentication, - "googlelogin": GoogleLoginAuthentication, -} - -AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] - - -class FileCache(object): - """Uses a local directory as a store for cached files. - Not really safe to use if multiple threads or processes are going to - be running on the same cache. - """ - - def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior - self.cache = cache - self.safe = safe - if not os.path.exists(cache): - os.makedirs(self.cache) - - def get(self, key): - retval = None - cacheFullPath = os.path.join(self.cache, self.safe(key)) - try: - f = open(cacheFullPath, "rb") - retval = f.read() - f.close() - except IOError: - pass - return retval - - def set(self, key, value): - cacheFullPath = os.path.join(self.cache, self.safe(key)) - f = open(cacheFullPath, "wb") - f.write(value) - f.close() - - def delete(self, key): - cacheFullPath = os.path.join(self.cache, self.safe(key)) - if os.path.exists(cacheFullPath): - os.remove(cacheFullPath) - - -class Credentials(object): - def __init__(self): - self.credentials = [] - - def add(self, name, password, domain=""): - self.credentials.append((domain.lower(), name, password)) - - def clear(self): - self.credentials = [] - - def iter(self, domain): - for (cdomain, name, password) in self.credentials: - if cdomain == "" or domain == cdomain: - yield (name, password) - - -class KeyCerts(Credentials): - """Identical to Credentials except that - name/password are mapped to key/cert.""" - - def add(self, key, cert, domain, password): - self.credentials.append((domain.lower(), key, cert, password)) - - def iter(self, domain): - for (cdomain, key, cert, password) in self.credentials: - if cdomain == "" or domain == cdomain: - yield (key, cert, password) - - -class AllHosts(object): - pass - - -class ProxyInfo(object): - """Collect information required to use a proxy.""" - - bypass_hosts = () - - def __init__( - self, proxy_type, proxy_host, proxy_port, proxy_rdns=True, proxy_user=None, proxy_pass=None, proxy_headers=None, - ): - """Args: - - proxy_type: The type of proxy server. This must be set to one of - socks.PROXY_TYPE_XXX constants. For example: p = - ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', - proxy_port=8000) - proxy_host: The hostname or IP address of the proxy server. - proxy_port: The port that the proxy server is running on. - proxy_rdns: If True (default), DNS queries will not be performed - locally, and instead, handed to the proxy to resolve. This is useful - if the network does not allow resolution of non-local names. In - httplib2 0.9 and earlier, this defaulted to False. - proxy_user: The username used to authenticate with the proxy server. - proxy_pass: The password used to authenticate with the proxy server. - proxy_headers: Additional or modified headers for the proxy connect - request. - """ - if isinstance(proxy_user, bytes): - proxy_user = proxy_user.decode() - if isinstance(proxy_pass, bytes): - proxy_pass = proxy_pass.decode() - ( - self.proxy_type, - self.proxy_host, - self.proxy_port, - self.proxy_rdns, - self.proxy_user, - self.proxy_pass, - self.proxy_headers, - ) = ( - proxy_type, - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) - - def astuple(self): - return ( - self.proxy_type, - self.proxy_host, - self.proxy_port, - self.proxy_rdns, - self.proxy_user, - self.proxy_pass, - self.proxy_headers, - ) - - def isgood(self): - return socks and (self.proxy_host != None) and (self.proxy_port != None) - - def applies_to(self, hostname): - return not self.bypass_host(hostname) - - def bypass_host(self, hostname): - """Has this host been excluded from the proxy config""" - if self.bypass_hosts is AllHosts: - return True - - hostname = "." + hostname.lstrip(".") - for skip_name in self.bypass_hosts: - # *.suffix - if skip_name.startswith(".") and hostname.endswith(skip_name): - return True - # exact match - if hostname == "." + skip_name: - return True - return False - - def __repr__(self): - return ( - "" - ).format(p=self) - - -def proxy_info_from_environment(method="http"): - """Read proxy info from the environment variables. - """ - if method not in ("http", "https"): - return - - env_var = method + "_proxy" - url = os.environ.get(env_var, os.environ.get(env_var.upper())) - if not url: - return - return proxy_info_from_url(url, method, noproxy=None) - - -def proxy_info_from_url(url, method="http", noproxy=None): - """Construct a ProxyInfo from a URL (such as http_proxy env var) - """ - url = urllib.parse.urlparse(url) - username = None - password = None - port = None - if "@" in url[1]: - ident, host_port = url[1].split("@", 1) - if ":" in ident: - username, password = ident.split(":", 1) - else: - password = ident - else: - host_port = url[1] - if ":" in host_port: - host, port = host_port.split(":", 1) - else: - host = host_port - - if port: - port = int(port) - else: - port = dict(https=443, http=80)[method] - - proxy_type = 3 # socks.PROXY_TYPE_HTTP - pi = ProxyInfo( - proxy_type=proxy_type, - proxy_host=host, - proxy_port=port, - proxy_user=username or None, - proxy_pass=password or None, - proxy_headers=None, - ) - - bypass_hosts = [] - # If not given an explicit noproxy value, respect values in env vars. - if noproxy is None: - noproxy = os.environ.get("no_proxy", os.environ.get("NO_PROXY", "")) - # Special case: A single '*' character means all hosts should be bypassed. - if noproxy == "*": - bypass_hosts = AllHosts - elif noproxy.strip(): - bypass_hosts = noproxy.split(",") - bypass_hosts = tuple(filter(bool, bypass_hosts)) # To exclude empty string. - - pi.bypass_hosts = bypass_hosts - return pi - - -class HTTPConnectionWithTimeout(http.client.HTTPConnection): - """HTTPConnection subclass that supports timeouts - - HTTPConnection subclass that supports timeouts - - All timeouts are in seconds. If None is passed for timeout then - Python's default timeout for sockets will be used. See for example - the docs of socket.setdefaulttimeout(): - http://docs.python.org/library/socket.html#socket.setdefaulttimeout - """ - - def __init__(self, host, port=None, timeout=None, proxy_info=None): - http.client.HTTPConnection.__init__(self, host, port=port, timeout=timeout) - - self.proxy_info = proxy_info - if proxy_info and not isinstance(proxy_info, ProxyInfo): - self.proxy_info = proxy_info("http") - - def connect(self): - """Connect to the host and port specified in __init__.""" - if self.proxy_info and socks is None: - raise ProxiesUnavailableError("Proxy support missing but proxy use was requested!") - if self.proxy_info and self.proxy_info.isgood() and self.proxy_info.applies_to(self.host): - use_proxy = True - ( - proxy_type, - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) = self.proxy_info.astuple() - - host = proxy_host - port = proxy_port - else: - use_proxy = False - - host = self.host - port = self.port - proxy_type = None - - socket_err = None - - for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): - af, socktype, proto, canonname, sa = res - try: - if use_proxy: - self.sock = socks.socksocket(af, socktype, proto) - self.sock.setproxy( - proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, - ) - else: - self.sock = socket.socket(af, socktype, proto) - self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - if has_timeout(self.timeout): - self.sock.settimeout(self.timeout) - if self.debuglevel > 0: - print("connect: ({0}, {1}) ************".format(self.host, self.port)) - if use_proxy: - print( - "proxy: {0} ************".format( - str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,)) - ) - ) - - self.sock.connect((self.host, self.port) + sa[2:]) - except socket.error as e: - socket_err = e - if self.debuglevel > 0: - print("connect fail: ({0}, {1})".format(self.host, self.port)) - if use_proxy: - print( - "proxy: {0}".format( - str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,)) - ) - ) - if self.sock: - self.sock.close() - self.sock = None - continue - break - if not self.sock: - raise socket_err - - -class HTTPSConnectionWithTimeout(http.client.HTTPSConnection): - """This class allows communication via SSL. - - All timeouts are in seconds. If None is passed for timeout then - Python's default timeout for sockets will be used. See for example - the docs of socket.setdefaulttimeout(): - http://docs.python.org/library/socket.html#socket.setdefaulttimeout - """ - - def __init__( - self, - host, - port=None, - key_file=None, - cert_file=None, - timeout=None, - proxy_info=None, - ca_certs=None, - disable_ssl_certificate_validation=False, - tls_maximum_version=None, - tls_minimum_version=None, - key_password=None, - ): - - self.disable_ssl_certificate_validation = disable_ssl_certificate_validation - self.ca_certs = ca_certs if ca_certs else CA_CERTS - - self.proxy_info = proxy_info - if proxy_info and not isinstance(proxy_info, ProxyInfo): - self.proxy_info = proxy_info("https") - - context = _build_ssl_context( - self.disable_ssl_certificate_validation, - self.ca_certs, - cert_file, - key_file, - maximum_version=tls_maximum_version, - minimum_version=tls_minimum_version, - key_password=key_password, - ) - super(HTTPSConnectionWithTimeout, self).__init__( - host, port=port, timeout=timeout, context=context, - ) - self.key_file = key_file - self.cert_file = cert_file - self.key_password = key_password - - def connect(self): - """Connect to a host on a given (SSL) port.""" - if self.proxy_info and self.proxy_info.isgood() and self.proxy_info.applies_to(self.host): - use_proxy = True - ( - proxy_type, - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) = self.proxy_info.astuple() - - host = proxy_host - port = proxy_port - else: - use_proxy = False - - host = self.host - port = self.port - proxy_type = None - proxy_headers = None - - socket_err = None - - address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) - for family, socktype, proto, canonname, sockaddr in address_info: - try: - if use_proxy: - sock = socks.socksocket(family, socktype, proto) - - sock.setproxy( - proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, - ) - else: - sock = socket.socket(family, socktype, proto) - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - if has_timeout(self.timeout): - sock.settimeout(self.timeout) - sock.connect((self.host, self.port)) - - self.sock = self._context.wrap_socket(sock, server_hostname=self.host) - - # Python 3.3 compatibility: emulate the check_hostname behavior - if not hasattr(self._context, "check_hostname") and not self.disable_ssl_certificate_validation: - try: - ssl.match_hostname(self.sock.getpeercert(), self.host) - except Exception: - self.sock.shutdown(socket.SHUT_RDWR) - self.sock.close() - raise - - if self.debuglevel > 0: - print("connect: ({0}, {1})".format(self.host, self.port)) - if use_proxy: - print( - "proxy: {0}".format( - str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,)) - ) - ) - except (ssl.SSLError, ssl.CertificateError) as e: - if sock: - sock.close() - if self.sock: - self.sock.close() - self.sock = None - raise - except (socket.timeout, socket.gaierror): - raise - except socket.error as e: - socket_err = e - if self.debuglevel > 0: - print("connect fail: ({0}, {1})".format(self.host, self.port)) - if use_proxy: - print( - "proxy: {0}".format( - str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,)) - ) - ) - if self.sock: - self.sock.close() - self.sock = None - continue - break - if not self.sock: - raise socket_err - - -SCHEME_TO_CONNECTION = { - "http": HTTPConnectionWithTimeout, - "https": HTTPSConnectionWithTimeout, -} - - -class Http(object): - """An HTTP client that handles: - - - all methods - - caching - - ETags - - compression, - - HTTPS - - Basic - - Digest - - WSSE - - and more. - """ - - def __init__( - self, - cache=None, - timeout=None, - proxy_info=proxy_info_from_environment, - ca_certs=None, - disable_ssl_certificate_validation=False, - tls_maximum_version=None, - tls_minimum_version=None, - ): - """If 'cache' is a string then it is used as a directory name for - a disk cache. Otherwise it must be an object that supports the - same interface as FileCache. - - All timeouts are in seconds. If None is passed for timeout - then Python's default timeout for sockets will be used. See - for example the docs of socket.setdefaulttimeout(): - http://docs.python.org/library/socket.html#socket.setdefaulttimeout - - `proxy_info` may be: - - a callable that takes the http scheme ('http' or 'https') and - returns a ProxyInfo instance per request. By default, uses - proxy_info_from_environment. - - a ProxyInfo instance (static proxy config). - - None (proxy disabled). - - ca_certs is the path of a file containing root CA certificates for SSL - server certificate validation. By default, a CA cert file bundled with - httplib2 is used. - - If disable_ssl_certificate_validation is true, SSL cert validation will - not be performed. - - tls_maximum_version / tls_minimum_version require Python 3.7+ / - OpenSSL 1.1.0g+. A value of "TLSv1_3" requires OpenSSL 1.1.1+. -""" - self.proxy_info = proxy_info - self.ca_certs = ca_certs - self.disable_ssl_certificate_validation = disable_ssl_certificate_validation - self.tls_maximum_version = tls_maximum_version - self.tls_minimum_version = tls_minimum_version - # Map domain name to an httplib connection - self.connections = {} - # The location of the cache, for now a directory - # where cached responses are held. - if cache and isinstance(cache, str): - self.cache = FileCache(cache) - else: - self.cache = cache - - # Name/password - self.credentials = Credentials() - - # Key/cert - self.certificates = KeyCerts() - - # authorization objects - self.authorizations = [] - - # If set to False then no redirects are followed, even safe ones. - self.follow_redirects = True - - self.redirect_codes = REDIRECT_CODES - - # Which HTTP methods do we apply optimistic concurrency to, i.e. - # which methods get an "if-match:" etag header added to them. - self.optimistic_concurrency_methods = ["PUT", "PATCH"] - - self.safe_methods = list(SAFE_METHODS) - - # If 'follow_redirects' is True, and this is set to True then - # all redirecs are followed, including unsafe ones. - self.follow_all_redirects = False - - self.ignore_etag = False - - self.force_exception_to_status_code = False - - self.timeout = timeout - - # Keep Authorization: headers on a redirect. - self.forward_authorization_headers = False - - def close(self): - """Close persistent connections, clear sensitive data. - Not thread-safe, requires external synchronization against concurrent requests. - """ - existing, self.connections = self.connections, {} - for _, c in existing.items(): - c.close() - self.certificates.clear() - self.clear_credentials() - - def __getstate__(self): - state_dict = copy.copy(self.__dict__) - # In case request is augmented by some foreign object such as - # credentials which handle auth - if "request" in state_dict: - del state_dict["request"] - if "connections" in state_dict: - del state_dict["connections"] - return state_dict - - def __setstate__(self, state): - self.__dict__.update(state) - self.connections = {} - - def _auth_from_challenge(self, host, request_uri, headers, response, content): - """A generator that creates Authorization objects - that can be applied to requests. - """ - challenges = auth._parse_www_authenticate(response, "www-authenticate") - for cred in self.credentials.iter(host): - for scheme in AUTH_SCHEME_ORDER: - if scheme in challenges: - yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) - - def add_credentials(self, name, password, domain=""): - """Add a name and password that will be used - any time a request requires authentication.""" - self.credentials.add(name, password, domain) - - def add_certificate(self, key, cert, domain, password=None): - """Add a key and cert that will be used - any time a request requires authentication.""" - self.certificates.add(key, cert, domain, password) - - def clear_credentials(self): - """Remove all the names and passwords - that are used for authentication""" - self.credentials.clear() - self.authorizations = [] - - def _conn_request(self, conn, request_uri, method, body, headers): - i = 0 - seen_bad_status_line = False - while i < RETRIES: - i += 1 - try: - if conn.sock is None: - conn.connect() - conn.request(method, request_uri, body, headers) - except socket.timeout: - conn.close() - raise - except socket.gaierror: - conn.close() - raise ServerNotFoundError("Unable to find the server at %s" % conn.host) - except socket.error as e: - errno_ = e.args[0].errno if isinstance(e.args[0], socket.error) else e.errno - if errno_ in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES: - continue # retry on potentially transient errors - raise - except http.client.HTTPException: - if conn.sock is None: - if i < RETRIES - 1: - conn.close() - conn.connect() - continue - else: - conn.close() - raise - if i < RETRIES - 1: - conn.close() - conn.connect() - continue - # Just because the server closed the connection doesn't apparently mean - # that the server didn't send a response. - pass - try: - response = conn.getresponse() - except (http.client.BadStatusLine, http.client.ResponseNotReady): - # If we get a BadStatusLine on the first try then that means - # the connection just went stale, so retry regardless of the - # number of RETRIES set. - if not seen_bad_status_line and i == 1: - i = 0 - seen_bad_status_line = True - conn.close() - conn.connect() - continue - else: - conn.close() - raise - except socket.timeout: - raise - except (socket.error, http.client.HTTPException): - conn.close() - if i == 0: - conn.close() - conn.connect() - continue - else: - raise - else: - content = b"" - if method == "HEAD": - conn.close() - else: - content = response.read() - response = Response(response) - if method != "HEAD": - content = _decompressContent(response, content) - - break - return (response, content) - - def _request( - self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey, - ): - """Do the actual request using the connection object - and also follow one level of redirects if necessary""" - - auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] - auth = auths and sorted(auths)[0][1] or None - if auth: - auth.request(method, request_uri, headers, body) - - (response, content) = self._conn_request(conn, request_uri, method, body, headers) - - if auth: - if auth.response(response, body): - auth.request(method, request_uri, headers, body) - (response, content) = self._conn_request(conn, request_uri, method, body, headers) - response._stale_digest = 1 - - if response.status == 401: - for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): - authorization.request(method, request_uri, headers, body) - (response, content) = self._conn_request(conn, request_uri, method, body, headers) - if response.status != 401: - self.authorizations.append(authorization) - authorization.response(response, body) - break - - if self.follow_all_redirects or method in self.safe_methods or response.status in (303, 308): - if self.follow_redirects and response.status in self.redirect_codes: - # Pick out the location header and basically start from the beginning - # remembering first to strip the ETag header and decrement our 'depth' - if redirections: - if "location" not in response and response.status != 300: - raise RedirectMissingLocation( - _("Redirected but the response is missing a Location: header."), response, content, - ) - # Fix-up relative redirects (which violate an RFC 2616 MUST) - if "location" in response: - location = response["location"] - (scheme, authority, path, query, fragment) = parse_uri(location) - if authority == None: - response["location"] = urllib.parse.urljoin(absolute_uri, location) - if response.status == 308 or (response.status == 301 and (method in self.safe_methods)): - response["-x-permanent-redirect-url"] = response["location"] - if "content-location" not in response: - response["content-location"] = absolute_uri - _updateCache(headers, response, content, self.cache, cachekey) - if "if-none-match" in headers: - del headers["if-none-match"] - if "if-modified-since" in headers: - del headers["if-modified-since"] - if "authorization" in headers and not self.forward_authorization_headers: - del headers["authorization"] - if "location" in response: - location = response["location"] - old_response = copy.deepcopy(response) - if "content-location" not in old_response: - old_response["content-location"] = absolute_uri - redirect_method = method - if response.status in [302, 303]: - redirect_method = "GET" - body = None - (response, content) = self.request( - location, method=redirect_method, body=body, headers=headers, redirections=redirections - 1, - ) - response.previous = old_response - else: - raise RedirectLimit( - "Redirected more times than redirection_limit allows.", response, content, - ) - elif response.status in [200, 203] and method in self.safe_methods: - # Don't cache 206's since we aren't going to handle byte range requests - if "content-location" not in response: - response["content-location"] = absolute_uri - _updateCache(headers, response, content, self.cache, cachekey) - - return (response, content) - - def _normalize_headers(self, headers): - return _normalize_headers(headers) - - # Need to catch and rebrand some exceptions - # Then need to optionally turn all exceptions into status codes - # including all socket.* and httplib.* exceptions. - - def request( - self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None, - ): - """ Performs a single HTTP request. -The 'uri' is the URI of the HTTP resource and can begin -with either 'http' or 'https'. The value of 'uri' must be an absolute URI. - -The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. -There is no restriction on the methods allowed. - -The 'body' is the entity body to be sent with the request. It is a string -object. - -Any extra headers that are to be sent with the request should be provided in the -'headers' dictionary. - -The maximum number of redirect to follow before raising an -exception is 'redirections. The default is 5. - -The return value is a tuple of (response, content), the first -being and instance of the 'Response' class, the second being -a string that contains the response entity body. - """ - conn_key = "" - - try: - if headers is None: - headers = {} - else: - headers = self._normalize_headers(headers) - - if "user-agent" not in headers: - headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__ - - uri = iri2uri(uri) - # Prevent CWE-75 space injection to manipulate request via part of uri. - # Prevent CWE-93 CRLF injection to modify headers via part of uri. - uri = uri.replace(" ", "%20").replace("\r", "%0D").replace("\n", "%0A") - - (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) - - conn_key = scheme + ":" + authority - conn = self.connections.get(conn_key) - if conn is None: - if not connection_type: - connection_type = SCHEME_TO_CONNECTION[scheme] - certs = list(self.certificates.iter(authority)) - if issubclass(connection_type, HTTPSConnectionWithTimeout): - if certs: - conn = self.connections[conn_key] = connection_type( - authority, - key_file=certs[0][0], - cert_file=certs[0][1], - timeout=self.timeout, - proxy_info=self.proxy_info, - ca_certs=self.ca_certs, - disable_ssl_certificate_validation=self.disable_ssl_certificate_validation, - tls_maximum_version=self.tls_maximum_version, - tls_minimum_version=self.tls_minimum_version, - key_password=certs[0][2], - ) - else: - conn = self.connections[conn_key] = connection_type( - authority, - timeout=self.timeout, - proxy_info=self.proxy_info, - ca_certs=self.ca_certs, - disable_ssl_certificate_validation=self.disable_ssl_certificate_validation, - tls_maximum_version=self.tls_maximum_version, - tls_minimum_version=self.tls_minimum_version, - ) - else: - conn = self.connections[conn_key] = connection_type( - authority, timeout=self.timeout, proxy_info=self.proxy_info - ) - conn.set_debuglevel(debuglevel) - - if "range" not in headers and "accept-encoding" not in headers: - headers["accept-encoding"] = "gzip, deflate" - - info = email.message.Message() - cachekey = None - cached_value = None - if self.cache: - cachekey = defrag_uri - cached_value = self.cache.get(cachekey) - if cached_value: - try: - info, content = cached_value.split(b"\r\n\r\n", 1) - info = email.message_from_bytes(info) - for k, v in info.items(): - if v.startswith("=?") and v.endswith("?="): - info.replace_header(k, str(*email.header.decode_header(v)[0])) - except (IndexError, ValueError): - self.cache.delete(cachekey) - cachekey = None - cached_value = None - - if ( - method in self.optimistic_concurrency_methods - and self.cache - and "etag" in info - and not self.ignore_etag - and "if-match" not in headers - ): - # http://www.w3.org/1999/04/Editing/ - headers["if-match"] = info["etag"] - - # https://tools.ietf.org/html/rfc7234 - # A cache MUST invalidate the effective Request URI as well as [...] Location and Content-Location - # when a non-error status code is received in response to an unsafe request method. - if self.cache and cachekey and method not in self.safe_methods: - self.cache.delete(cachekey) - - # Check the vary header in the cache to see if this request - # matches what varies in the cache. - if method in self.safe_methods and "vary" in info: - vary = info["vary"] - vary_headers = vary.lower().replace(" ", "").split(",") - for header in vary_headers: - key = "-varied-%s" % header - value = info[key] - if headers.get(header, None) != value: - cached_value = None - break - - if ( - self.cache - and cached_value - and (method in self.safe_methods or info["status"] == "308") - and "range" not in headers - ): - redirect_method = method - if info["status"] not in ("307", "308"): - redirect_method = "GET" - if "-x-permanent-redirect-url" in info: - # Should cached permanent redirects be counted in our redirection count? For now, yes. - if redirections <= 0: - raise RedirectLimit( - "Redirected more times than redirection_limit allows.", {}, "", - ) - (response, new_content) = self.request( - info["-x-permanent-redirect-url"], - method=redirect_method, - headers=headers, - redirections=redirections - 1, - ) - response.previous = Response(info) - response.previous.fromcache = True - else: - # Determine our course of action: - # Is the cached entry fresh or stale? - # Has the client requested a non-cached response? - # - # There seems to be three possible answers: - # 1. [FRESH] Return the cache entry w/o doing a GET - # 2. [STALE] Do the GET (but add in cache validators if available) - # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request - entry_disposition = _entry_disposition(info, headers) - - if entry_disposition == "FRESH": - if not cached_value: - info["status"] = "504" - content = b"" - response = Response(info) - if cached_value: - response.fromcache = True - return (response, content) - - if entry_disposition == "STALE": - if "etag" in info and not self.ignore_etag and not "if-none-match" in headers: - headers["if-none-match"] = info["etag"] - if "last-modified" in info and not "last-modified" in headers: - headers["if-modified-since"] = info["last-modified"] - elif entry_disposition == "TRANSPARENT": - pass - - (response, new_content) = self._request( - conn, authority, uri, request_uri, method, body, headers, redirections, cachekey, - ) - - if response.status == 304 and method == "GET": - # Rewrite the cache entry with the new end-to-end headers - # Take all headers that are in response - # and overwrite their values in info. - # unless they are hop-by-hop, or are listed in the connection header. - - for key in _get_end2end_headers(response): - info[key] = response[key] - merged_response = Response(info) - if hasattr(response, "_stale_digest"): - merged_response._stale_digest = response._stale_digest - _updateCache(headers, merged_response, content, self.cache, cachekey) - response = merged_response - response.status = 200 - response.fromcache = True - - elif response.status == 200: - content = new_content - else: - self.cache.delete(cachekey) - content = new_content - else: - cc = _parse_cache_control(headers) - if "only-if-cached" in cc: - info["status"] = "504" - response = Response(info) - content = b"" - else: - (response, content) = self._request( - conn, authority, uri, request_uri, method, body, headers, redirections, cachekey, - ) - except Exception as e: - is_timeout = isinstance(e, socket.timeout) - if is_timeout: - conn = self.connections.pop(conn_key, None) - if conn: - conn.close() - - if self.force_exception_to_status_code: - if isinstance(e, HttpLib2ErrorWithResponse): - response = e.response - content = e.content - response.status = 500 - response.reason = str(e) - elif isinstance(e, socket.timeout): - content = b"Request Timeout" - response = Response({"content-type": "text/plain", "status": "408", "content-length": len(content),}) - response.reason = "Request Timeout" - else: - content = str(e).encode("utf-8") - response = Response({"content-type": "text/plain", "status": "400", "content-length": len(content),}) - response.reason = "Bad Request" - else: - raise - - return (response, content) - - -class Response(dict): - """An object more like email.message than httplib.HTTPResponse.""" - - """Is this response from our local cache""" - fromcache = False - """HTTP protocol version used by server. - - 10 for HTTP/1.0, 11 for HTTP/1.1. - """ - version = 11 - - "Status code returned by server. " - status = 200 - """Reason phrase returned by server.""" - reason = "Ok" - - previous = None - - def __init__(self, info): - # info is either an email.message or - # an httplib.HTTPResponse object. - if isinstance(info, http.client.HTTPResponse): - for key, value in info.getheaders(): - key = key.lower() - prev = self.get(key) - if prev is not None: - value = ", ".join((prev, value)) - self[key] = value - self.status = info.status - self["status"] = str(self.status) - self.reason = info.reason - self.version = info.version - elif isinstance(info, email.message.Message): - for key, value in list(info.items()): - self[key.lower()] = value - self.status = int(self["status"]) - else: - for key, value in info.items(): - self[key.lower()] = value - self.status = int(self.get("status", self.status)) - - def __getattr__(self, name): - if name == "dict": - return self - else: - raise AttributeError(name) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/auth.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/auth.py deleted file mode 100755 index 84b5831..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/auth.py +++ /dev/null @@ -1,63 +0,0 @@ -import base64 -import re - -import pyparsing as pp - -from .error import * - -UNQUOTE_PAIRS = re.compile(r"\\(.)") -unquote = lambda s, l, t: UNQUOTE_PAIRS.sub(r"\1", t[0][1:-1]) - -# https://tools.ietf.org/html/rfc7235#section-1.2 -# https://tools.ietf.org/html/rfc7235#appendix-B -tchar = "!#$%&'*+-.^_`|~" + pp.nums + pp.alphas -token = pp.Word(tchar).setName("token") -token68 = pp.Combine(pp.Word("-._~+/" + pp.nums + pp.alphas) + pp.Optional(pp.Word("=").leaveWhitespace())).setName( - "token68" -) - -quoted_string = pp.dblQuotedString.copy().setName("quoted-string").setParseAction(unquote) -auth_param_name = token.copy().setName("auth-param-name").addParseAction(pp.downcaseTokens) -auth_param = auth_param_name + pp.Suppress("=") + (quoted_string | token) -params = pp.Dict(pp.delimitedList(pp.Group(auth_param))) - -scheme = token("scheme") -challenge = scheme + (params("params") | token68("token")) - -authentication_info = params.copy() -www_authenticate = pp.delimitedList(pp.Group(challenge)) - - -def _parse_authentication_info(headers, headername="authentication-info"): - """https://tools.ietf.org/html/rfc7615 - """ - header = headers.get(headername, "").strip() - if not header: - return {} - try: - parsed = authentication_info.parseString(header) - except pp.ParseException as ex: - # print(ex.explain(ex)) - raise MalformedHeader(headername) - - return parsed.asDict() - - -def _parse_www_authenticate(headers, headername="www-authenticate"): - """Returns a dictionary of dictionaries, one dict per auth_scheme.""" - header = headers.get(headername, "").strip() - if not header: - return {} - try: - parsed = www_authenticate.parseString(header) - except pp.ParseException as ex: - # print(ex.explain(ex)) - raise MalformedHeader(headername) - - retval = { - challenge["scheme"].lower(): challenge["params"].asDict() - if "params" in challenge - else {"token": challenge.get("token")} - for challenge in parsed - } - return retval diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/cacerts.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/cacerts.txt deleted file mode 100644 index 8020c1b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/cacerts.txt +++ /dev/null @@ -1,2197 +0,0 @@ -# Issuer: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc. -# Subject: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc. -# Label: "GTE CyberTrust Global Root" -# Serial: 421 -# MD5 Fingerprint: ca:3d:d3:68:f1:03:5c:d0:32:fa:b8:2b:59:e8:5a:db -# SHA1 Fingerprint: 97:81:79:50:d8:1c:96:70:cc:34:d8:09:cf:79:44:31:36:7e:f4:74 -# SHA256 Fingerprint: a5:31:25:18:8d:21:10:aa:96:4b:02:c7:b7:c6:da:32:03:17:08:94:e5:fb:71:ff:fb:66:67:d5:e6:81:0a:36 ------BEGIN CERTIFICATE----- -MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD -VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv -bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv -b3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV -UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU -cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds -b2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH -iM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS -r41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4 -04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r -GwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9 -3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P -lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ ------END CERTIFICATE----- - -# Issuer: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division -# Subject: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division -# Label: "Thawte Server CA" -# Serial: 1 -# MD5 Fingerprint: c5:70:c4:a2:ed:53:78:0c:c8:10:53:81:64:cb:d0:1d -# SHA1 Fingerprint: 23:e5:94:94:51:95:f2:41:48:03:b4:d5:64:d2:a3:a3:f5:d8:8b:8c -# SHA256 Fingerprint: b4:41:0b:73:e2:e6:ea:ca:47:fb:c4:2f:8f:a4:01:8a:f4:38:1d:c5:4c:fa:a8:44:50:46:1e:ed:09:45:4d:e9 ------BEGIN CERTIFICATE----- -MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx -FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD -VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv -biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm -MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx -MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT -DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3 -dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl -cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3 -DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD -gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91 -yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX -L+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGj -EzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG -7oWDTSEwjsrZqG9JGubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6e -QNuozDJ0uW8NxuOzRAvZim+aKZuZGCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZ -qdq5snUb9kLy78fyGPmJvKP/iiMucEc= ------END CERTIFICATE----- - -# Issuer: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division -# Subject: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division -# Label: "Thawte Premium Server CA" -# Serial: 1 -# MD5 Fingerprint: 06:9f:69:79:16:66:90:02:1b:8c:8c:a2:c3:07:6f:3a -# SHA1 Fingerprint: 62:7f:8d:78:27:65:63:99:d2:7d:7f:90:44:c9:fe:b3:f3:3e:fa:9a -# SHA256 Fingerprint: ab:70:36:36:5c:71:54:aa:29:c2:c2:9f:5d:41:91:16:3b:16:2a:22:25:01:13:57:d5:6d:07:ff:a7:bc:1f:72 ------BEGIN CERTIFICATE----- -MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkEx -FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD -VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv -biBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFByZW1pdW0gU2Vy -dmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZlckB0aGF3dGUuY29t -MB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYTAlpB -MRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsG -A1UEChMUVGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRp -b24gU2VydmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNl -cnZlciBDQTEoMCYGCSqGSIb3DQEJARYZcHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNv -bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2aovXwlue2oFBYo847kkE -VdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIhUdib0GfQ -ug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMR -uHM/qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG -9w0BAQQFAAOBgQAmSCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUI -hfzJATj/Tb7yFkJD57taRvvBxhEf8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JM -pAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7tUCemDaYj+bvLpgcUQg== ------END CERTIFICATE----- - -# Issuer: O=Equifax OU=Equifax Secure Certificate Authority -# Subject: O=Equifax OU=Equifax Secure Certificate Authority -# Label: "Equifax Secure CA" -# Serial: 903804111 -# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4 -# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a -# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78 ------BEGIN CERTIFICATE----- -MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV -UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy -dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1 -MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx -dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B -AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f -BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A -cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC -AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ -MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm -aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw -ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj -IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF -MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA -A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y -7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh -1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4 ------END CERTIFICATE----- - -# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network -# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network -# Label: "Verisign Class 3 Public Primary Certification Authority - G2" -# Serial: 167285380242319648451154478808036881606 -# MD5 Fingerprint: a2:33:9b:4c:74:78:73:d4:6c:e7:c1:f3:8d:cb:5c:e9 -# SHA1 Fingerprint: 85:37:1c:a6:e5:50:14:3d:ce:28:03:47:1b:de:3a:09:e8:f8:77:0f -# SHA256 Fingerprint: 83:ce:3c:12:29:68:8a:59:3d:48:5f:81:97:3c:0f:91:95:43:1e:da:37:cc:5e:36:43:0e:79:c7:a8:88:63:8b ------BEGIN CERTIFICATE----- -MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJ -BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh -c3MgMyBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy -MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp -emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X -DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw -FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMg -UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo -YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5 -MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB -AQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCOFoUgRm1HP9SFIIThbbP4 -pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71lSk8UOg0 -13gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwID -AQABMA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSk -U01UbSuvDV1Ai2TT1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7i -F6YM40AIOw7n60RzKprxaZLvcRTDOaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpY -oJ2daZH9 ------END CERTIFICATE----- - -# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA -# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA -# Label: "GlobalSign Root CA" -# Serial: 4835703278459707669005204 -# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a -# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c -# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 ------BEGIN CERTIFICATE----- -MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG -A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv -b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw -MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i -YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT -aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ -jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp -xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp -1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG -snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ -U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 -9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E -BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B -AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz -yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE -38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP -AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad -DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME -HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== ------END CERTIFICATE----- - -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 -# Label: "GlobalSign Root CA - R2" -# Serial: 4835703278459682885658125 -# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 -# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe -# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e ------BEGIN CERTIFICATE----- -MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G -A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp -Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 -MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG -A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL -v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 -eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq -tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd -C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa -zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB -mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH -V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n -bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG -3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs -J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO -291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS -ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd -AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 -TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== ------END CERTIFICATE----- - -# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority -# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority -# Label: "ValiCert Class 1 VA" -# Serial: 1 -# MD5 Fingerprint: 65:58:ab:15:ad:57:6c:1e:a8:a7:b5:69:ac:bf:ff:eb -# SHA1 Fingerprint: e5:df:74:3c:b6:01:c4:9b:98:43:dc:ab:8c:e8:6a:81:10:9f:e4:8e -# SHA256 Fingerprint: f4:c1:49:55:1a:30:13:a3:5b:c7:bf:fe:17:a7:f3:44:9b:c1:ab:5b:5a:0a:e7:4b:06:c2:3b:90:00:4c:01:04 ------BEGIN CERTIFICATE----- -MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0 -IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz -BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y -aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG -9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIyMjM0OFoXDTE5MDYy -NTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y -azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs -YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw -Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl -cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9Y -LqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIiGQj4/xEjm84H9b9pGib+ -TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCmDuJWBQ8Y -TfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0 -LBwGlN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLW -I8sogTLDAHkY7FkXicnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPw -nXS3qT6gpf+2SQMT2iLM7XGCK5nPOrf1LXLI ------END CERTIFICATE----- - -# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority -# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority -# Label: "ValiCert Class 2 VA" -# Serial: 1 -# MD5 Fingerprint: a9:23:75:9b:ba:49:36:6e:31:c2:db:f2:e7:66:ba:87 -# SHA1 Fingerprint: 31:7a:2a:d0:7f:2b:33:5e:f5:a1:c3:4e:4b:57:e8:b7:d8:f1:fc:a6 -# SHA256 Fingerprint: 58:d0:17:27:9c:d4:dc:63:ab:dd:b1:96:a6:c9:90:6c:30:c4:e0:87:83:ea:e8:c1:60:99:54:d6:93:55:59:6b ------BEGIN CERTIFICATE----- -MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0 -IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz -BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y -aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG -9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMTk1NFoXDTE5MDYy -NjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y -azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs -YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw -Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl -cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDOOnHK5avIWZJV16vY -dA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVCCSRrCl6zfN1SLUzm1NZ9 -WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7RfZHM047QS -v4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9v -UJSZSWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTu -IYEZoDJJKPTEjlbVUjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwC -W/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd ------END CERTIFICATE----- - -# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority -# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority -# Label: "RSA Root Certificate 1" -# Serial: 1 -# MD5 Fingerprint: a2:6f:53:b7:ee:40:db:4a:68:e7:fa:18:d9:10:4b:72 -# SHA1 Fingerprint: 69:bd:8c:f4:9c:d3:00:fb:59:2e:17:93:ca:55:6a:f3:ec:aa:35:fb -# SHA256 Fingerprint: bc:23:f9:8a:31:3c:b9:2d:e3:bb:fc:3a:5a:9f:44:61:ac:39:49:4c:4a:e1:5a:9e:9d:f1:31:e9:9b:73:01:9a ------BEGIN CERTIFICATE----- -MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0 -IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz -BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y -aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG -9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy -NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y -azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs -YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw -Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl -cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD -cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs -2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY -JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE -Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ -n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A -PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Label: "Verisign Class 3 Public Primary Certification Authority - G3" -# Serial: 206684696279472310254277870180966723415 -# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09 -# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6 -# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44 ------BEGIN CERTIFICATE----- -MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw -CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl -cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu -LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT -aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD -VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT -aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ -bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu -IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b -N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t -KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu -kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm -CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ -Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu -imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te -2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe -DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC -/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p -F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt -TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Label: "Verisign Class 4 Public Primary Certification Authority - G3" -# Serial: 314531972711909413743075096039378935511 -# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df -# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d -# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06 ------BEGIN CERTIFICATE----- -MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw -CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl -cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu -LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT -aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD -VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT -aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ -bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu -IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1 -GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ -+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd -U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm -NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY -ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ -ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1 -CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq -g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm -fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c -2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/ -bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== ------END CERTIFICATE----- - -# Issuer: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited -# Subject: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited -# Label: "Entrust.net Secure Server CA" -# Serial: 927650371 -# MD5 Fingerprint: df:f2:80:73:cc:f1:e6:61:73:fc:f5:42:e9:c5:7c:ee -# SHA1 Fingerprint: 99:a6:9b:e6:1a:fe:88:6b:4d:2b:82:00:7c:b8:54:fc:31:7e:15:39 -# SHA256 Fingerprint: 62:f2:40:27:8c:56:4c:4d:d8:bf:7d:9d:4f:6f:36:6e:a8:94:d2:2f:5f:34:d9:89:a9:83:ac:ec:2f:ff:ed:50 ------BEGIN CERTIFICATE----- -MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMC -VVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5u -ZXQvQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMc -KGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5u -ZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05OTA1 -MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIGA1UE -ChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5j -b3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF -bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUg -U2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUA -A4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQaO2f55M28Qpku0f1BBc/ -I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5gXpa0zf3 -wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OC -AdcwggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHb -oIHYpIHVMIHSMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5 -BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p -dHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1pdGVk -MTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp -b24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu -dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0 -MFqBDzIwMTkwNTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8Bdi -E1U9s/8KAGv7UISX8+1i0BowHQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAa -MAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EABAwwChsEVjQuMAMCBJAwDQYJKoZI -hvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyNEwr75Ji174z4xRAN -95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9n9cd -2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= ------END CERTIFICATE----- - -# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited -# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited -# Label: "Entrust.net Premium 2048 Secure Server CA" -# Serial: 946059622 -# MD5 Fingerprint: ba:21:ea:20:d6:dd:db:8f:c1:57:8b:40:ad:a1:fc:fc -# SHA1 Fingerprint: 80:1d:62:d0:7b:44:9d:5c:5c:03:5c:98:ea:61:fa:44:3c:2a:58:fe -# SHA256 Fingerprint: d1:c3:39:ea:27:84:eb:87:0f:93:4f:c5:63:4e:4a:a9:ad:55:05:01:64:01:f2:64:65:d3:7a:57:46:63:35:9f ------BEGIN CERTIFICATE----- -MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML -RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp -bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 -IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp -ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy -MjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 -LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp -YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG -A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq -K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe -sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX -MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT -XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ -HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH -4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA -vtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G -CSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA -WUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo -oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ -h7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18 -f3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN -B/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy -vUxFnmG6v4SBkgPR0ml8xQ== ------END CERTIFICATE----- - -# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust -# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust -# Label: "Baltimore CyberTrust Root" -# Serial: 33554617 -# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 -# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 -# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb ------BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ -RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD -VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX -DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y -ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy -VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr -mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr -IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK -mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu -XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy -dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye -jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 -BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 -DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 -9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx -jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 -Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz -ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS -R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp ------END CERTIFICATE----- - -# Issuer: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc. -# Subject: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc. -# Label: "Equifax Secure Global eBusiness CA" -# Serial: 1 -# MD5 Fingerprint: 8f:5d:77:06:27:c4:98:3c:5b:93:78:e7:d7:7d:9b:cc -# SHA1 Fingerprint: 7e:78:4a:10:1c:82:65:cc:2d:e1:f1:6d:47:b4:40:ca:d9:0a:19:45 -# SHA256 Fingerprint: 5f:0b:62:ea:b5:e3:53:ea:65:21:65:16:58:fb:b6:53:59:f4:43:28:0a:4a:fb:d1:04:d7:7d:10:f9:f0:4c:07 ------BEGIN CERTIFICATE----- -MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc -MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT -ZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw -MDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj -dXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l -c3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC -UdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc -58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/ -o5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH -MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr -aGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA -A4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA -Z70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv -8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV ------END CERTIFICATE----- - -# Issuer: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc. -# Subject: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc. -# Label: "Equifax Secure eBusiness CA 1" -# Serial: 4 -# MD5 Fingerprint: 64:9c:ef:2e:44:fc:c6:8f:52:07:d0:51:73:8f:cb:3d -# SHA1 Fingerprint: da:40:18:8b:91:89:a3:ed:ee:ae:da:97:fe:2f:9d:f5:b7:d1:8a:41 -# SHA256 Fingerprint: cf:56:ff:46:a4:a1:86:10:9d:d9:65:84:b5:ee:b5:8a:51:0c:42:75:b0:e5:f9:4f:40:bb:ae:86:5e:19:f6:73 ------BEGIN CERTIFICATE----- -MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEc -MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBT -ZWN1cmUgZUJ1c2luZXNzIENBLTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQw -MDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5j -LjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENBLTEwgZ8wDQYJ -KoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ1MRo -RvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBu -WqDZQu4aIZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKw -Env+j6YDAgMBAAGjZjBkMBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTAD -AQH/MB8GA1UdIwQYMBaAFEp4MlIR21kWNl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRK -eDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQFAAOBgQB1W6ibAxHm6VZM -zfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5lSE/9dR+ -WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN -/Bf+KpYrtWKmpj29f5JZzVoqgrI3eQ== ------END CERTIFICATE----- - -# Issuer: O=Equifax Secure OU=Equifax Secure eBusiness CA-2 -# Subject: O=Equifax Secure OU=Equifax Secure eBusiness CA-2 -# Label: "Equifax Secure eBusiness CA 2" -# Serial: 930140085 -# MD5 Fingerprint: aa:bf:bf:64:97:da:98:1d:6f:c6:08:3a:95:70:33:ca -# SHA1 Fingerprint: 39:4f:f6:85:0b:06:be:52:e5:18:56:cc:10:e1:80:e8:82:b3:85:cc -# SHA256 Fingerprint: 2f:27:4e:48:ab:a4:ac:7b:76:59:33:10:17:75:50:6d:c3:0e:e3:8e:f6:ac:d5:c0:49:32:cf:e0:41:23:42:20 ------BEGIN CERTIFICATE----- -MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV -UzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2Vj -dXJlIGVCdXNpbmVzcyBDQS0yMB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0 -NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkVxdWlmYXggU2VjdXJlMSYwJAYD -VQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCBnzANBgkqhkiG9w0B -AQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn2Z0G -vxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/ -BPO3QSQ5BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0C -AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEX -MBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJl -IGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTkw -NjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9euSBIplBq -y/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQF -MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA -A4GBAAyGgq3oThr1jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy -0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1 -E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUmV+GRMOrN ------END CERTIFICATE----- - -# Issuer: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network -# Subject: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network -# Label: "AddTrust Low-Value Services Root" -# Serial: 1 -# MD5 Fingerprint: 1e:42:95:02:33:92:6b:b9:5f:c0:7f:da:d6:b2:4b:fc -# SHA1 Fingerprint: cc:ab:0e:a0:4c:23:01:d6:69:7b:dd:37:9f:cd:12:eb:24:e3:94:9d -# SHA256 Fingerprint: 8c:72:09:27:9a:c0:4e:27:5e:16:d0:7f:d3:b7:75:e8:01:54:b5:96:80:46:e3:1f:52:dd:25:76:63:24:e9:a7 ------BEGIN CERTIFICATE----- -MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU -MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 -b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw -MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML -QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD -VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul -CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n -tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl -dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch -PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC -+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O -BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E -BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk -ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB -IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X -7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz -43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY -eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl -pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA -WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= ------END CERTIFICATE----- - -# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network -# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network -# Label: "AddTrust External Root" -# Serial: 1 -# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f -# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68 -# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2 ------BEGIN CERTIFICATE----- -MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU -MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs -IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 -MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux -FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h -bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v -dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt -H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 -uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX -mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX -a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN -E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 -WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD -VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 -Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU -cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx -IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN -AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH -YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 -6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC -Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX -c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a -mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= ------END CERTIFICATE----- - -# Issuer: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network -# Subject: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network -# Label: "AddTrust Public Services Root" -# Serial: 1 -# MD5 Fingerprint: c1:62:3e:23:c5:82:73:9c:03:59:4b:2b:e9:77:49:7f -# SHA1 Fingerprint: 2a:b6:28:48:5e:78:fb:f3:ad:9e:79:10:dd:6b:df:99:72:2c:96:e5 -# SHA256 Fingerprint: 07:91:ca:07:49:b2:07:82:aa:d3:c7:d7:bd:0c:df:c9:48:58:35:84:3e:b2:d7:99:60:09:ce:43:ab:6c:69:27 ------BEGIN CERTIFICATE----- -MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEU -MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 -b3JrMSAwHgYDVQQDExdBZGRUcnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAx -MDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtB -ZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIDAeBgNV -BAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV -6tsfSlbunyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nX -GCwwfQ56HmIexkvA/X1id9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnP -dzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSGAa2Il+tmzV7R/9x98oTaunet3IAIx6eH -1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAwHM+A+WD+eeSI8t0A65RF -62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0GA1UdDgQW -BBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUw -AwEB/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDEL -MAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRU -cnVzdCBUVFAgTmV0d29yazEgMB4GA1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJv -b3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4JNojVhaTdt02KLmuG7jD8WS6 -IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL+YPoRNWyQSW/ -iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao -GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh -4SINhwBk/ox9Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQm -XiLsks3/QppEIW1cxeMiHV9HEufOX1362KqxMy3ZdvJOOjMMK7MtkAY= ------END CERTIFICATE----- - -# Issuer: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network -# Subject: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network -# Label: "AddTrust Qualified Certificates Root" -# Serial: 1 -# MD5 Fingerprint: 27:ec:39:47:cd:da:5a:af:e2:9a:01:65:21:a9:4c:bb -# SHA1 Fingerprint: 4d:23:78:ec:91:95:39:b5:00:7f:75:8f:03:3b:21:1e:c5:4d:8b:cf -# SHA256 Fingerprint: 80:95:21:08:05:db:4b:bc:35:5e:44:28:d8:fd:6e:c2:cd:e3:ab:5f:b9:7a:99:42:98:8e:b8:f4:dc:d0:60:16 ------BEGIN CERTIFICATE----- -MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEU -MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 -b3JrMSMwIQYDVQQDExpBZGRUcnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1 -MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcxCzAJBgNVBAYTAlNFMRQwEgYDVQQK -EwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIzAh -BgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwq -xBb/4Oxx64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G -87B4pfYOQnrjfxvM0PC3KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i -2O+tCBGaKZnhqkRFmhJePp1tUvznoD1oL/BLcHwTOK28FSXx1s6rosAx1i+f4P8U -WfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GRwVY18BTcZTYJbqukB8c1 -0cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HUMIHRMB0G -A1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0T -AQH/BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6Fr -pGkwZzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQL -ExRBZGRUcnVzdCBUVFAgTmV0d29yazEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlm -aWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBABmrder4i2VhlRO6aQTv -hsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxGGuoYQ992zPlm -hpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X -dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3 -P6CxB9bpT9zeRXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9Y -iQBCYz95OdBEsIJuQRno3eDBiFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5no -xqE= ------END CERTIFICATE----- - -# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. -# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. -# Label: "Entrust Root Certification Authority" -# Serial: 1164660820 -# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 -# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 -# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c ------BEGIN CERTIFICATE----- -MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC -VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 -Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW -KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl -cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw -NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw -NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy -ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV -BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo -Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 -4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 -KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI -rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi -94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB -sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi -gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo -kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE -vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA -A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t -O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua -AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP -9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ -eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m -0vdXcDazv/wor3ElhVsT/h5/WrQ8 ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc. -# Subject: CN=GeoTrust Global CA O=GeoTrust Inc. -# Label: "GeoTrust Global CA" -# Serial: 144470 -# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5 -# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12 -# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a ------BEGIN CERTIFICATE----- -MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT -MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i -YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG -EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg -R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 -9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq -fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv -iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU -1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ -bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW -MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA -ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l -uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn -Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS -tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF -PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un -hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV -5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Global CA 2 O=GeoTrust Inc. -# Subject: CN=GeoTrust Global CA 2 O=GeoTrust Inc. -# Label: "GeoTrust Global CA 2" -# Serial: 1 -# MD5 Fingerprint: 0e:40:a7:6c:de:03:5d:8f:d1:0f:e4:d1:8d:f9:6c:a9 -# SHA1 Fingerprint: a9:e9:78:08:14:37:58:88:f2:05:19:b0:6d:2b:0d:2b:60:16:90:7d -# SHA256 Fingerprint: ca:2d:82:a0:86:77:07:2f:8a:b6:76:4f:f0:35:67:6c:fe:3e:5e:32:5e:01:21:72:df:3f:92:09:6d:b7:9b:85 ------BEGIN CERTIFICATE----- -MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEW -MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFs -IENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQG -EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3Qg -R2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDvPE1A -PRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/NTL8 -Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hL -TytCOb1kLUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL -5mkWRxHCJ1kDs6ZgwiFAVvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7 -S4wMcoKK+xfNAGw6EzywhIdLFnopsk/bHdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe -2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE -FHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNHK266ZUap -EBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6td -EPx7srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv -/NgdRN3ggX+d6YvhZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywN -A0ZF66D0f0hExghAzN4bcLUprbqLOzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0 -abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkCx1YAzUm5s2x7UwQa4qjJqhIF -I8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqFH4z1Ir+rzoPz -4iIprn2DQKi6bA== ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc. -# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc. -# Label: "GeoTrust Universal CA" -# Serial: 1 -# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48 -# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79 -# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12 ------BEGIN CERTIFICATE----- -MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW -MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy -c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE -BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 -IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV -VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 -cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT -QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh -F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v -c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w -mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd -VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX -teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ -f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe -Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ -nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB -/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY -MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG -9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc -aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX -IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn -ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z -uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN -Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja -QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW -koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 -ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt -DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm -bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. -# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. -# Label: "GeoTrust Universal CA 2" -# Serial: 1 -# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7 -# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79 -# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b ------BEGIN CERTIFICATE----- -MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW -MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy -c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD -VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 -c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 -WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG -FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq -XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL -se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb -KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd -IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 -y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt -hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc -QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 -Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV -HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV -HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ -KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z -dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ -L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr -Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo -ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY -T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz -GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m -1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV -OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH -6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX -QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS ------END CERTIFICATE----- - -# Issuer: CN=America Online Root Certification Authority 1 O=America Online Inc. -# Subject: CN=America Online Root Certification Authority 1 O=America Online Inc. -# Label: "America Online Root Certification Authority 1" -# Serial: 1 -# MD5 Fingerprint: 14:f1:08:ad:9d:fa:64:e2:89:e7:1c:cf:a8:ad:7d:5e -# SHA1 Fingerprint: 39:21:c1:15:c1:5d:0e:ca:5c:cb:5b:c4:f0:7d:21:d8:05:0b:56:6a -# SHA256 Fingerprint: 77:40:73:12:c6:3a:15:3d:5b:c0:0b:4e:51:75:9c:df:da:c2:37:dc:2a:33:b6:79:46:e9:8e:9b:fa:68:0a:e3 ------BEGIN CERTIFICATE----- -MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc -MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP -bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2 -MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft -ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg -Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lk -hsmj76CGv2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym -1BW32J/X3HGrfpq/m44zDyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsW -OqMFf6Dch9Wc/HKpoH145LcxVR5lu9RhsCFg7RAycsWSJR74kEoYeEfffjA3PlAb -2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP8c9GsEsPPt2IYriMqQko -O3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0TAQH/BAUw -AwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAU -AK3Zo/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB -BQUAA4IBAQB8itEfGDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkF -Zu90821fnZmv9ov761KyBZiibyrFVL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAb -LjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft3OJvx8Fi8eNy1gTIdGcL+oir -oQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43gKd8hdIaC2y+C -MMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds -sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 ------END CERTIFICATE----- - -# Issuer: CN=America Online Root Certification Authority 2 O=America Online Inc. -# Subject: CN=America Online Root Certification Authority 2 O=America Online Inc. -# Label: "America Online Root Certification Authority 2" -# Serial: 1 -# MD5 Fingerprint: d6:ed:3c:ca:e2:66:0f:af:10:43:0d:77:9b:04:09:bf -# SHA1 Fingerprint: 85:b5:ff:67:9b:0c:79:96:1f:c8:6e:44:22:00:46:13:db:17:92:84 -# SHA256 Fingerprint: 7d:3b:46:5a:60:14:e5:26:c0:af:fc:ee:21:27:d2:31:17:27:ad:81:1c:26:84:2d:00:6a:f3:73:06:cc:80:bd ------BEGIN CERTIFICATE----- -MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc -MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP -bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2 -MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft -ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg -Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIP -ADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC -206B89enfHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFci -KtZHgVdEglZTvYYUAQv8f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2 -JxhP7JsowtS013wMPgwr38oE18aO6lhOqKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9 -BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JNRvCAOVIyD+OEsnpD8l7e -Xz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0gBe4lL8B -PeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67 -Xnfn6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEq -Z8A9W6Wa6897GqidFEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZ -o2C7HK2JNDJiuEMhBnIMoVxtRsX6Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3 -+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnjB453cMor9H124HhnAgMBAAGj -YzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3OpaaEg5+31IqEj -FNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE -AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmn -xPBUlgtk87FYT15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2 -LHo1YGwRgJfMqZJS5ivmae2p+DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzccc -obGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXgJXUjhx5c3LqdsKyzadsXg8n33gy8 -CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//ZoyzH1kUQ7rVyZ2OuMe -IjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgOZtMA -DjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2F -AjgQ5ANh1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUX -Om/9riW99XJZZLF0KjhfGEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPb -AZO1XB4Y3WRayhgoPmMEEf0cjQAPuDffZ4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQl -Zvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuPcX/9XhmgD0uRuMRUvAaw -RY8mkaKO/qk= ------END CERTIFICATE----- - -# Issuer: CN=AAA Certificate Services O=Comodo CA Limited -# Subject: CN=AAA Certificate Services O=Comodo CA Limited -# Label: "Comodo AAA Services root" -# Serial: 1 -# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 -# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 -# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 ------BEGIN CERTIFICATE----- -MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb -MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow -GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj -YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL -MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE -BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM -GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua -BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe -3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 -YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR -rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm -ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU -oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF -MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v -QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t -b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF -AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q -GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz -Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 -G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi -l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 -smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== ------END CERTIFICATE----- - -# Issuer: CN=Secure Certificate Services O=Comodo CA Limited -# Subject: CN=Secure Certificate Services O=Comodo CA Limited -# Label: "Comodo Secure Services root" -# Serial: 1 -# MD5 Fingerprint: d3:d9:bd:ae:9f:ac:67:24:b3:c8:1b:52:e1:b9:a9:bd -# SHA1 Fingerprint: 4a:65:d5:f4:1d:ef:39:b8:b8:90:4a:4a:d3:64:81:33:cf:c7:a1:d1 -# SHA256 Fingerprint: bd:81:ce:3b:4f:65:91:d1:1a:67:b5:fc:7a:47:fd:ef:25:52:1b:f9:aa:4e:18:b9:e3:df:2e:34:a7:80:3b:e8 ------BEGIN CERTIFICATE----- -MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEb -MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow -GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRp -ZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVow -fjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G -A1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAiBgNV -BAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEB -BQADggEPADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPM -cm3ye5drswfxdySRXyWP9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3S -HpR7LZQdqnXXs5jLrLxkU0C8j6ysNstcrbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996 -CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rCoznl2yY4rYsK7hljxxwk -3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3Vp6ea5EQz -6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNV -HQ4EFgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1Ud -EwEB/wQFMAMBAf8wgYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2Rv -Y2EuY29tL1NlY3VyZUNlcnRpZmljYXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRw -Oi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmww -DQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm4J4oqF7Tt/Q0 -5qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj -Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtI -gKvcnDe4IRRLDXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJ -aD61JlfutuC23bkpgHl9j6PwpCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDl -izeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1HRR3B7Hzs/Sk= ------END CERTIFICATE----- - -# Issuer: CN=Trusted Certificate Services O=Comodo CA Limited -# Subject: CN=Trusted Certificate Services O=Comodo CA Limited -# Label: "Comodo Trusted Services root" -# Serial: 1 -# MD5 Fingerprint: 91:1b:3f:6e:cd:9e:ab:ee:07:fe:1f:71:d2:b3:61:27 -# SHA1 Fingerprint: e1:9f:e3:0e:8b:84:60:9e:80:9b:17:0d:72:a8:c5:ba:6e:14:09:bd -# SHA256 Fingerprint: 3f:06:e5:56:81:d4:96:f5:be:16:9e:b5:38:9f:9f:2b:8f:f6:1e:17:08:df:68:81:72:48:49:cd:5d:27:cb:69 ------BEGIN CERTIFICATE----- -MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEb -MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow -GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0 -aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTla -MH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO -BgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUwIwYD -VQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWW -fnJSoBVC21ndZHoa0Lh73TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMt -TGo87IvDktJTdyR0nAducPy9C1t2ul/y/9c3S0pgePfw+spwtOpZqqPOSC+pw7IL -fhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6juljatEPmsbS9Is6FARW -1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsSivnkBbA7 -kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0G -A1UdDgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYD -VR0TAQH/BAUwAwEB/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21v -ZG9jYS5jb20vVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRo -dHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMu -Y3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8NtwuleGFTQQuS9/ -HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 -pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxIS -jBc/lDb+XbDABHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+ -xqFx7D+gIIxmOom0jtTYsU0lR+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/Atyjcn -dBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi ------END CERTIFICATE----- - -# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com -# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com -# Label: "UTN DATACorp SGC Root CA" -# Serial: 91374294542884689855167577680241077609 -# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06 -# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4 -# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48 ------BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug -Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI ------END CERTIFICATE----- - -# Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com -# Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com -# Label: "UTN USERFirst Hardware Root CA" -# Serial: 91374294542884704022267039221184531197 -# MD5 Fingerprint: 4c:56:41:e5:0d:bb:2b:e8:ca:a3:ed:18:08:ad:43:39 -# SHA1 Fingerprint: 04:83:ed:33:99:ac:36:08:05:87:22:ed:bc:5e:46:00:e3:be:f9:d7 -# SHA256 Fingerprint: 6e:a5:47:41:d0:04:66:7e:ed:1b:48:16:63:4a:a3:a7:9e:6e:4b:96:95:0f:82:79:da:fc:8d:9b:d8:81:21:37 ------BEGIN CERTIFICATE----- -MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB -lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug -Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt -SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG -A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe -MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v -d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh -cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn -0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ -M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a -MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd -oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI -DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy -oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD -VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 -dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy -bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF -BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM -//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli -CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE -CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t -3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS -KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== ------END CERTIFICATE----- - -# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com -# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com -# Label: "XRamp Global CA Root" -# Serial: 107108908803651509692980124233745014957 -# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 -# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 -# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 ------BEGIN CERTIFICATE----- -MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB -gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk -MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY -UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx -NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 -dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy -dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB -dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 -38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP -KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q -DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 -qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa -JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi -PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P -BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs -jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 -eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD -ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR -vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt -qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa -IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy -i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ -O+7ETPTsJ3xCwnR8gooJybQDJbw= ------END CERTIFICATE----- - -# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority -# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority -# Label: "Go Daddy Class 2 CA" -# Serial: 0 -# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 -# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 -# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 ------BEGIN CERTIFICATE----- -MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh -MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE -YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 -MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo -ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg -MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN -ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA -PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w -wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi -EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY -avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ -YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE -sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h -/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 -IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj -YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD -ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy -OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P -TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ -HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER -dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf -ReYNnyicsbkqWletNw+vHX/bvZ8= ------END CERTIFICATE----- - -# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority -# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority -# Label: "Starfield Class 2 CA" -# Serial: 0 -# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 -# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a -# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 ------BEGIN CERTIFICATE----- -MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl -MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp -U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw -NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE -ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp -ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 -DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf -8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN -+lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 -X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa -K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA -1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G -A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR -zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 -YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD -bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w -DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 -L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D -eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl -xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp -VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY -WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= ------END CERTIFICATE----- - -# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Label: "StartCom Certification Authority" -# Serial: 1 -# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16 -# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f -# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea ------BEGIN CERTIFICATE----- -MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg -Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 -MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi -U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh -cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk -pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf -OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C -Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT -Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi -HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM -Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w -+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ -Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 -Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B -26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID -AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE -FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j -ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js -LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM -BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 -Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy -dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh -cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh -YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg -dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp -bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ -YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT -TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ -9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 -jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW -FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz -ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 -ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L -EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu -L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq -yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC -O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V -um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh -NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Assured ID Root CA" -# Serial: 17154717934120587862167794914071425081 -# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 -# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 -# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c ------BEGIN CERTIFICATE----- -MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv -b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG -EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl -cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c -JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP -mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ -wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 -VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ -AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB -AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW -BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun -pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC -dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf -fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm -NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx -H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe -+o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert Global Root CA" -# Serial: 10944719598952040374951832963794454346 -# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e -# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 -# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 ------BEGIN CERTIFICATE----- -MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD -QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT -MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j -b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB -CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 -nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt -43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P -T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 -gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO -BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR -TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw -DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr -hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg -06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF -PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls -YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk -CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= ------END CERTIFICATE----- - -# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com -# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com -# Label: "DigiCert High Assurance EV Root CA" -# Serial: 3553400076410547919724730734378100087 -# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a -# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 -# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf ------BEGIN CERTIFICATE----- -MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j -ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL -MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 -LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug -RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm -+9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW -PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM -xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB -Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 -hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg -EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF -MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA -FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec -nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z -eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF -hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 -Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe -vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep -+OkuE6N36B9K ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. -# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. -# Label: "GeoTrust Primary Certification Authority" -# Serial: 32798226551256963324313806436981982369 -# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf -# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96 -# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c ------BEGIN CERTIFICATE----- -MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY -MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo -R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx -MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK -Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp -ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 -AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA -ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 -7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W -kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI -mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G -A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ -KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 -6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl -4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K -oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj -UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU -AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= ------END CERTIFICATE----- - -# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only -# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only -# Label: "thawte Primary Root CA" -# Serial: 69529181992039203566298953787712940909 -# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12 -# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81 -# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB -qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf -Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw -MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV -BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw -NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j -LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG -A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl -IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs -W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta -3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk -6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 -Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J -NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA -MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP -r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU -DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz -YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX -xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 -/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ -LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 -jVaMaA== ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only -# Label: "VeriSign Class 3 Public Primary Certification Authority - G5" -# Serial: 33037644167568058970164719475676101450 -# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c -# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5 -# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df ------BEGIN CERTIFICATE----- -MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB -yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL -ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp -U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW -ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL -MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW -ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln -biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp -U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y -aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 -nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex -t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz -SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG -BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ -rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ -NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E -BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH -BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy -aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv -MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE -p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y -5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK -WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ -4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N -hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq ------END CERTIFICATE----- - -# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited -# Subject: CN=COMODO Certification Authority O=COMODO CA Limited -# Label: "COMODO Certification Authority" -# Serial: 104350513648249232941998508985834464573 -# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 -# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b -# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 ------BEGIN CERTIFICATE----- -MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB -gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G -A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV -BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw -MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl -YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P -RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 -UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI -2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 -Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp -+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ -DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O -nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW -/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g -PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u -QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY -SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv -IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ -RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 -zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd -BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB -ZQ== ------END CERTIFICATE----- - -# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. -# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. -# Label: "Network Solutions Certificate Authority" -# Serial: 116697915152937497490437556386812487904 -# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e -# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce -# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c ------BEGIN CERTIFICATE----- -MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi -MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu -MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp -dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV -UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO -ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz -c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP -OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl -mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF -BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 -qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw -gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu -bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp -dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 -6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ -h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH -/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv -wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN -pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey ------END CERTIFICATE----- - -# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited -# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited -# Label: "COMODO ECC Certification Authority" -# Serial: 41578283867086692638256921589707938090 -# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 -# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 -# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 ------BEGIN CERTIFICATE----- -MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL -MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE -BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT -IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw -MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy -ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N -T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv -biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR -FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J -cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW -BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ -BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm -fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv -GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= ------END CERTIFICATE----- - -# Issuer: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA -# Subject: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA -# Label: "TC TrustCenter Class 2 CA II" -# Serial: 941389028203453866782103406992443 -# MD5 Fingerprint: ce:78:33:5c:59:78:01:6e:18:ea:b9:36:a0:b9:2e:23 -# SHA1 Fingerprint: ae:50:83:ed:7c:f4:5c:bc:8f:61:c6:21:fe:68:5d:79:42:21:15:6e -# SHA256 Fingerprint: e6:b8:f8:76:64:85:f8:07:ae:7f:8d:ac:16:70:46:1f:07:c0:a1:3e:ef:3a:1f:f7:17:53:8d:7a:ba:d3:91:b4 ------BEGIN CERTIFICATE----- -MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjEL -MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV -BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0 -Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYwMTEyMTQzODQzWhcNMjUxMjMxMjI1 -OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i -SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UEAxMc -VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jf -tMjWQ+nEdVl//OEd+DFwIxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKg -uNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2J -XjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQXa7pIXSSTYtZgo+U4+lK -8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7uSNQZu+99 -5OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1Ud -EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3 -kUrL84J6E1wIqzCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy -dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6 -Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz -JTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290 -Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u -TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iS -GNn3Bzn1LL4GdXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprt -ZjluS5TmVfwLG4t3wVMTZonZKNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8 -au0WOB9/WIFaGusyiC2y8zl3gK9etmF1KdsjTYjKUCjLhdLTEKJZbtOTVAB6okaV -hgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kPJOzHdiEoZa5X6AeI -dUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfkvQ== ------END CERTIFICATE----- - -# Issuer: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA -# Subject: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA -# Label: "TC TrustCenter Class 3 CA II" -# Serial: 1506523511417715638772220530020799 -# MD5 Fingerprint: 56:5f:aa:80:61:12:17:f6:67:21:e6:2b:6d:61:56:8e -# SHA1 Fingerprint: 80:25:ef:f4:6e:70:c8:d4:72:24:65:84:fe:40:3b:8a:8d:6a:db:f5 -# SHA256 Fingerprint: 8d:a0:84:fc:f9:9c:e0:77:22:f8:9b:32:05:93:98:06:fa:5c:b8:11:e1:c8:13:f6:a1:08:c7:d3:36:b3:40:8e ------BEGIN CERTIFICATE----- -MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjEL -MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV -BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0 -Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYwMTEyMTQ0MTU3WhcNMjUxMjMxMjI1 -OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i -SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UEAxMc -VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJW -Ht4bNwcwIi9v8Qbxq63WyKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+Q -Vl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo6SI7dYnWRBpl8huXJh0obazovVkdKyT2 -1oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZuV3bOx4a+9P/FRQI2Alq -ukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk2ZyqBwi1 -Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1Ud -EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NX -XAek0CSnwPIA1DCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy -dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6 -Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz -JTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290 -Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u -TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlN -irTzwppVMXzEO2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8 -TtXqluJucsG7Kv5sbviRmEb8yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6 -g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9IJqDnxrcOfHFcqMRA/07QlIp2+gB -95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal092Y+tTmBvTwtiBj -S+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc5A== ------END CERTIFICATE----- - -# Issuer: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA -# Subject: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA -# Label: "TC TrustCenter Universal CA I" -# Serial: 601024842042189035295619584734726 -# MD5 Fingerprint: 45:e1:a5:72:c5:a9:36:64:40:9e:f5:e4:58:84:67:8c -# SHA1 Fingerprint: 6b:2f:34:ad:89:58:be:62:fd:b0:6b:5c:ce:bb:9d:d9:4f:4e:39:f3 -# SHA256 Fingerprint: eb:f3:c0:2a:87:89:b1:fb:7d:51:19:95:d6:63:b7:29:06:d9:13:ce:0d:5e:10:56:8a:8a:77:e2:58:61:67:e7 ------BEGIN CERTIFICATE----- -MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTEL -MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV -BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1 -c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcNMDYwMzIyMTU1NDI4WhcNMjUxMjMx -MjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIg -R21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYwJAYD -VQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSR -JJZ4Hgmgm5qVSkr1YnwCqMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3T -fCZdzHd55yx4Oagmcw6iXSVphU9VDprvxrlE4Vc93x9UIuVvZaozhDrzznq+VZeu -jRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtwag+1m7Z3W0hZneTvWq3z -wZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9OgdwZu5GQ -fezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYD -VR0jBBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAO -BgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0G -CSqGSIb3DQEBBQUAA4IBAQAo0uCG1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X1 -7caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/CyvwbZ71q+s2IhtNerNXxTPqYn -8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3ghUJGooWMNjs -ydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT -ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/ -2TYcuiUaUj0a7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY ------END CERTIFICATE----- - -# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc -# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc -# Label: "Cybertrust Global Root" -# Serial: 4835703278459682877484360 -# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 -# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 -# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 ------BEGIN CERTIFICATE----- -MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG -A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh -bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE -ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS -b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 -7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS -J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y -HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP -t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz -FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY -XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ -MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw -hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js -MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA -A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj -Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx -XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o -omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc -A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW -WL1WMRJOEcgh4LMRkWXbtKaIOM5V ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only -# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only -# Label: "GeoTrust Primary Certification Authority - G3" -# Serial: 28809105769928564313984085209975885599 -# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05 -# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd -# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4 ------BEGIN CERTIFICATE----- -MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB -mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT -MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s -eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv -cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ -BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg -MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 -BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz -+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm -hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn -5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W -JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL -DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC -huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw -HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB -AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB -zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN -kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD -AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH -SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G -spki4cErx5z481+oghLrGREt ------END CERTIFICATE----- - -# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only -# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only -# Label: "thawte Primary Root CA - G2" -# Serial: 71758320672825410020661621085256472406 -# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f -# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12 -# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57 ------BEGIN CERTIFICATE----- -MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL -MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp -IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi -BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw -MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh -d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig -YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v -dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ -BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 -papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K -DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 -KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox -XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== ------END CERTIFICATE----- - -# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only -# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only -# Label: "thawte Primary Root CA - G3" -# Serial: 127614157056681299805556476275995414779 -# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31 -# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2 -# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c ------BEGIN CERTIFICATE----- -MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB -rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf -Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw -MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV -BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa -Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl -LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u -MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl -ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm -gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 -YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf -b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 -9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S -zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk -OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV -HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA -2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW -oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu -t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c -KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM -m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu -MdRAGmI0Nj81Aa6sY6A= ------END CERTIFICATE----- - -# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only -# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only -# Label: "GeoTrust Primary Certification Authority - G2" -# Serial: 80682863203381065782177908751794619243 -# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a -# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 -# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 ------BEGIN CERTIFICATE----- -MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL -MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj -KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 -MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 -eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV -BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw -NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV -BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH -MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL -So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal -tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO -BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG -CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT -qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz -rD6ogRLQy7rQkgu2npaqBA+K ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only -# Label: "VeriSign Universal Root Certification Authority" -# Serial: 85209574734084581917763752644031726877 -# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 -# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 -# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c ------BEGIN CERTIFICATE----- -MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB -vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL -ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp -U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W -ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe -Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX -MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 -IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y -IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh -bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF -9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH -H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H -LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN -/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT -rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud -EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw -WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs -exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud -DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 -sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ -seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz -4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ -BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR -lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 -7M2CYfE45k+XmCpajQ== ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only -# Label: "VeriSign Class 3 Public Primary Certification Authority - G4" -# Serial: 63143484348153506665311985501458640051 -# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41 -# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a -# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79 ------BEGIN CERTIFICATE----- -MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL -MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW -ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln -biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp -U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y -aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG -A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp -U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg -SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln -biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 -IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm -GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve -fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw -AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ -aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj -aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW -kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC -4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga -FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== ------END CERTIFICATE----- - -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 -# Label: "GlobalSign Root CA - R3" -# Serial: 4835703278459759426209954 -# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 -# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad -# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b ------BEGIN CERTIFICATE----- -MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G -A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp -Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 -MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG -A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 -RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT -gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm -KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd -QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ -XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw -DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o -LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU -RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp -jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK -6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX -mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs -Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH -WD9f ------END CERTIFICATE----- - -# Issuer: CN=TC TrustCenter Universal CA III O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA -# Subject: CN=TC TrustCenter Universal CA III O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA -# Label: "TC TrustCenter Universal CA III" -# Serial: 2010889993983507346460533407902964 -# MD5 Fingerprint: 9f:dd:db:ab:ff:8e:ff:45:21:5f:f0:6c:9d:8f:fe:2b -# SHA1 Fingerprint: 96:56:cd:7b:57:96:98:95:d0:e1:41:46:68:06:fb:b8:c6:11:06:87 -# SHA256 Fingerprint: 30:9b:4a:87:f6:ca:56:c9:31:69:aa:a9:9c:6d:98:88:54:d7:89:2b:d5:43:7e:2d:07:b2:9c:be:da:55:d3:5d ------BEGIN CERTIFICATE----- -MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezEL -MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV -BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1 -c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAeFw0wOTA5MDkwODE1MjdaFw0yOTEy -MzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNUQyBUcnVzdENlbnRl -ciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0ExKDAm -BgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF -5+cvAqBNLaT6hdqbJYUtQCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYv -DIRlzg9uwliT6CwLOunBjvvya8o84pxOjuT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8v -zArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+EutCHnNaYlAJ/Uqwa1D7KRT -yGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1M4BDj5yj -dipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBh -MB8GA1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMB -Af8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI -4jANBgkqhkiG9w0BAQUFAAOCAQEAg8ev6n9NCjw5sWi+e22JLumzCecYV42Fmhfz -dkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+KGwWaODIl0YgoGhnYIg5IFHY -aAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhKBgePxLcHsU0G -DeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV -CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPH -LQNjO9Po5KIqwoIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg== ------END CERTIFICATE----- - -# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. -# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. -# Label: "Go Daddy Root Certificate Authority - G2" -# Serial: 0 -# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 -# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b -# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da ------BEGIN CERTIFICATE----- -MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx -EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT -EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp -ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz -NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH -EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE -AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw -DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD -E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH -/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy -DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh -GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR -tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA -AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE -FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX -WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu -9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr -gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo -2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO -LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI -4uJEvlz36hz1 ------END CERTIFICATE----- - -# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. -# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. -# Label: "Starfield Root Certificate Authority - G2" -# Serial: 0 -# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 -# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e -# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 ------BEGIN CERTIFICATE----- -MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx -EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT -HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs -ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw -MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 -b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj -aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg -nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 -HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N -Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN -dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 -HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO -BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G -CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU -sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 -4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg -8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K -pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 -mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 ------END CERTIFICATE----- - -# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. -# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. -# Label: "Starfield Services Root Certificate Authority - G2" -# Serial: 0 -# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 -# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f -# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 ------BEGIN CERTIFICATE----- -MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx -EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT -HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs -ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 -MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD -VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy -ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy -dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p -OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 -8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K -Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe -hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk -6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw -DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q -AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI -bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB -ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z -qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd -iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn -0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN -sSi6 ------END CERTIFICATE----- - -# Issuer: CN=AffirmTrust Commercial O=AffirmTrust -# Subject: CN=AffirmTrust Commercial O=AffirmTrust -# Label: "AffirmTrust Commercial" -# Serial: 8608355977964138876 -# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 -# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 -# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 ------BEGIN CERTIFICATE----- -MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE -BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz -dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL -MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp -cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP -Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr -ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL -MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 -yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr -VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ -nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ -KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG -XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj -vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt -Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g -N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC -nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= ------END CERTIFICATE----- - -# Issuer: CN=AffirmTrust Networking O=AffirmTrust -# Subject: CN=AffirmTrust Networking O=AffirmTrust -# Label: "AffirmTrust Networking" -# Serial: 8957382827206547757 -# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f -# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f -# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b ------BEGIN CERTIFICATE----- -MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE -BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz -dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL -MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp -cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y -YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua -kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL -QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp -6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG -yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i -QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ -KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO -tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu -QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ -Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u -olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 -x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= ------END CERTIFICATE----- - -# Issuer: CN=AffirmTrust Premium O=AffirmTrust -# Subject: CN=AffirmTrust Premium O=AffirmTrust -# Label: "AffirmTrust Premium" -# Serial: 7893706540734352110 -# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 -# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 -# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a ------BEGIN CERTIFICATE----- -MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE -BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz -dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG -A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U -cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf -qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ -JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ -+jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS -s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 -HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 -70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG -V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S -qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S -5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia -C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX -OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE -FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ -BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 -KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg -Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B -8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ -MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc -0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ -u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF -u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH -YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 -GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO -RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e -KeC2uAloGRwYQw== ------END CERTIFICATE----- - -# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust -# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust -# Label: "AffirmTrust Premium ECC" -# Serial: 8401224907861490260 -# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d -# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb -# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 ------BEGIN CERTIFICATE----- -MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC -VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ -cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ -BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt -VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D -0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 -ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G -A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G -A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs -aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I -flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== ------END CERTIFICATE----- - -# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Label: "StartCom Certification Authority" -# Serial: 45 -# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16 -# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0 -# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11 ------BEGIN CERTIFICATE----- -MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg -Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 -MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi -U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh -cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk -pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf -OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C -Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT -Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi -HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM -Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w -+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ -Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 -Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B -26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID -AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD -VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul -F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC -ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w -ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk -aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 -YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg -c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 -aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 -d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG -CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 -dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF -wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS -Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst -0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc -pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl -CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF -P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK -1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm -KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE -JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ -8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm -fyWl8kgAwKQB2j8= ------END CERTIFICATE----- - -# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd. -# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd. -# Label: "StartCom Certification Authority G2" -# Serial: 59 -# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64 -# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17 -# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95 ------BEGIN CERTIFICATE----- -MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm -aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 -OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG -A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ -JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD -vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo -D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ -Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW -RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK -HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN -nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM -0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i -UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 -Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg -TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE -AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL -BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K -2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX -UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl -6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK -9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ -HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI -wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY -XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l -IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo -hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr -so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI ------END CERTIFICATE----- - -# Issuer: O=Digital Signature Trust Co., CN=DST Root CA X3 -# Subject: O=Digital Signature Trust Co., CN=DST Root CA X3 -# Label: "IdenTrust DST Root CA X3" -# Serial: 44AFB080D6A327BA893039862EF8406B -# MD5 Fingerprint: 41:03:52:DC:0F:F7:50:1B:16:F0:02:8E:BA:6F:45:C5 -# SHA1 Fingerprint: DA:C9:02:4F:54:D8:F6:DF:94:93:5F:B1:73:26:38:CA:6A:D7:7C:13 -# SHA256 Fingerprint: 06:87:26:03:31:A7:24:03:D9:09:F1:05:E6:9B:CF:0D:32:E1:BD:24:93:FF:C6:D9:20:6D:11:BC:D6:77:07:39 ------BEGIN CERTIFICATE----- -MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ -MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT -DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow -PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD -Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O -rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq -OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b -xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw -7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD -aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG -SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 -ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr -AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz -R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 -JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo -Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ ------END CERTIFICATE----- - -# Issuer: CN=DigiCert Global Root G2, OU=www.digicert.com, O=DigiCert Inc, C=US -# Subject: CN=DigiCert Global Root G2, OU=www.digicert.com, O=DigiCert Inc, C=US -# Serial: 33af1e6a711a9a0bb2864b11d09fae5 -# MD5 Fingerprint: E4:A6:8A:C8:54:AC:52:42:46:0A:FD:72:48:1B:2A:44 -# SHA1 Fingerprint: DF:3C:24:F9:BF:D6:66:76:1B:26:80:73:FE:06:D1:CC:8D:4F:82:A4 -# SHA256 Fingerprint: CB:3C:CB:B7:60:31:E5:E0:13:8F:8D:D3:9A:23:F9:DE:47:FF:C3:5E:43:C1:14:4C:EA:27:D4:6A:5A:B1:CB:5F ------BEGIN CERTIFICATE----- -MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH -MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT -MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j -b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI -2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx -1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ -q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz -tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ -vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP -BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV -5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY -1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 -NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG -Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 -8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe -pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl -MrY= ------END CERTIFICATE----- - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/certs.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/certs.py deleted file mode 100755 index 59d1ffc..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/certs.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Utilities for certificate management.""" - -import os - -certifi_available = False -certifi_where = None -try: - from certifi import where as certifi_where - certifi_available = True -except ImportError: - pass - -custom_ca_locater_available = False -custom_ca_locater_where = None -try: - from ca_certs_locater import get as custom_ca_locater_where - custom_ca_locater_available = True -except ImportError: - pass - - -BUILTIN_CA_CERTS = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "cacerts.txt" -) - - -def where(): - env = os.environ.get("HTTPLIB2_CA_CERTS") - if env is not None: - if os.path.isfile(env): - return env - else: - raise RuntimeError("Environment variable HTTPLIB2_CA_CERTS not a valid file") - if custom_ca_locater_available: - return custom_ca_locater_where() - if certifi_available: - return certifi_where() - return BUILTIN_CA_CERTS - - -if __name__ == "__main__": - print(where()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/error.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/error.py deleted file mode 100755 index 0e68c12..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/error.py +++ /dev/null @@ -1,48 +0,0 @@ -# All exceptions raised here derive from HttpLib2Error -class HttpLib2Error(Exception): - pass - - -# Some exceptions can be caught and optionally -# be turned back into responses. -class HttpLib2ErrorWithResponse(HttpLib2Error): - def __init__(self, desc, response, content): - self.response = response - self.content = content - HttpLib2Error.__init__(self, desc) - - -class RedirectMissingLocation(HttpLib2ErrorWithResponse): - pass - - -class RedirectLimit(HttpLib2ErrorWithResponse): - pass - - -class FailedToDecompressContent(HttpLib2ErrorWithResponse): - pass - - -class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): - pass - - -class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): - pass - - -class MalformedHeader(HttpLib2Error): - pass - - -class RelativeURIError(HttpLib2Error): - pass - - -class ServerNotFoundError(HttpLib2Error): - pass - - -class ProxiesUnavailableError(HttpLib2Error): - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/iri2uri.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/iri2uri.py deleted file mode 100755 index 86e361e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/iri2uri.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -"""Converts an IRI to a URI.""" - -__author__ = "Joe Gregorio (joe@bitworking.org)" -__copyright__ = "Copyright 2006, Joe Gregorio" -__contributors__ = [] -__version__ = "1.0.0" -__license__ = "MIT" - -import urllib.parse - -# Convert an IRI to a URI following the rules in RFC 3987 -# -# The characters we need to enocde and escape are defined in the spec: -# -# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD -# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF -# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD -# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD -# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD -# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD -# / %xD0000-DFFFD / %xE1000-EFFFD - -escape_range = [ - (0xA0, 0xD7FF), - (0xE000, 0xF8FF), - (0xF900, 0xFDCF), - (0xFDF0, 0xFFEF), - (0x10000, 0x1FFFD), - (0x20000, 0x2FFFD), - (0x30000, 0x3FFFD), - (0x40000, 0x4FFFD), - (0x50000, 0x5FFFD), - (0x60000, 0x6FFFD), - (0x70000, 0x7FFFD), - (0x80000, 0x8FFFD), - (0x90000, 0x9FFFD), - (0xA0000, 0xAFFFD), - (0xB0000, 0xBFFFD), - (0xC0000, 0xCFFFD), - (0xD0000, 0xDFFFD), - (0xE1000, 0xEFFFD), - (0xF0000, 0xFFFFD), - (0x100000, 0x10FFFD), -] - - -def encode(c): - retval = c - i = ord(c) - for low, high in escape_range: - if i < low: - break - if i >= low and i <= high: - retval = "".join(["%%%2X" % o for o in c.encode("utf-8")]) - break - return retval - - -def iri2uri(uri): - """Convert an IRI to a URI. Note that IRIs must be - passed in a unicode strings. That is, do not utf-8 encode - the IRI before passing it into the function.""" - if isinstance(uri, str): - (scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri) - authority = authority.encode("idna").decode("utf-8") - # For each character in 'ucschar' or 'iprivate' - # 1. encode as utf-8 - # 2. then %-encode each octet of that utf-8 - uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment)) - uri = "".join([encode(c) for c in uri]) - return uri - - -if __name__ == "__main__": - import unittest - - class Test(unittest.TestCase): - def test_uris(self): - """Test that URIs are invariant under the transformation.""" - invariant = [ - "ftp://ftp.is.co.za/rfc/rfc1808.txt", - "http://www.ietf.org/rfc/rfc2396.txt", - "ldap://[2001:db8::7]/c=GB?objectClass?one", - "mailto:John.Doe@example.com", - "news:comp.infosystems.www.servers.unix", - "tel:+1-816-555-1212", - "telnet://192.0.2.16:80/", - "urn:oasis:names:specification:docbook:dtd:xml:4.1.2", - ] - for uri in invariant: - self.assertEqual(uri, iri2uri(uri)) - - def test_iri(self): - """Test that the right type of escaping is done for each part of the URI.""" - self.assertEqual( - "http://xn--o3h.com/%E2%98%84", - iri2uri("http://\N{COMET}.com/\N{COMET}"), - ) - self.assertEqual( - "http://bitworking.org/?fred=%E2%98%84", - iri2uri("http://bitworking.org/?fred=\N{COMET}"), - ) - self.assertEqual( - "http://bitworking.org/#%E2%98%84", - iri2uri("http://bitworking.org/#\N{COMET}"), - ) - self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}")) - self.assertEqual( - "/fred?bar=%E2%98%9A#%E2%98%84", - iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"), - ) - self.assertEqual( - "/fred?bar=%E2%98%9A#%E2%98%84", - iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")), - ) - self.assertNotEqual( - "/fred?bar=%E2%98%9A#%E2%98%84", - iri2uri( - "/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode("utf-8") - ), - ) - - unittest.main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/socks.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/socks.py deleted file mode 100755 index cc68e63..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2/socks.py +++ /dev/null @@ -1,518 +0,0 @@ -"""SocksiPy - Python SOCKS module. - -Version 1.00 - -Copyright 2006 Dan-Haim. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -3. Neither the name of Dan Haim nor the names of his contributors may be used - to endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA -OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT -OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. - -This module provides a standard socket-like interface for Python -for tunneling connections through SOCKS proxies. - -Minor modifications made by Christopher Gilbert (http://motomastyle.com/) for -use in PyLoris (http://pyloris.sourceforge.net/). - -Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/) -mainly to merge bug fixes found in Sourceforge. -""" - -import base64 -import socket -import struct -import sys - -if getattr(socket, "socket", None) is None: - raise ImportError("socket.socket missing, proxy support unusable") - -PROXY_TYPE_SOCKS4 = 1 -PROXY_TYPE_SOCKS5 = 2 -PROXY_TYPE_HTTP = 3 -PROXY_TYPE_HTTP_NO_TUNNEL = 4 - -_defaultproxy = None -_orgsocket = socket.socket - - -class ProxyError(Exception): - pass - - -class GeneralProxyError(ProxyError): - pass - - -class Socks5AuthError(ProxyError): - pass - - -class Socks5Error(ProxyError): - pass - - -class Socks4Error(ProxyError): - pass - - -class HTTPError(ProxyError): - pass - - -_generalerrors = ( - "success", - "invalid data", - "not connected", - "not available", - "bad proxy type", - "bad input", -) - -_socks5errors = ( - "succeeded", - "general SOCKS server failure", - "connection not allowed by ruleset", - "Network unreachable", - "Host unreachable", - "Connection refused", - "TTL expired", - "Command not supported", - "Address type not supported", - "Unknown error", -) - -_socks5autherrors = ( - "succeeded", - "authentication is required", - "all offered authentication methods were rejected", - "unknown username or invalid password", - "unknown error", -) - -_socks4errors = ( - "request granted", - "request rejected or failed", - "request rejected because SOCKS server cannot connect to identd on the client", - "request rejected because the client program and identd report different " - "user-ids", - "unknown error", -) - - -def setdefaultproxy( - proxytype=None, addr=None, port=None, rdns=True, username=None, password=None -): - """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) - Sets a default proxy which all further socksocket objects will use, - unless explicitly changed. - """ - global _defaultproxy - _defaultproxy = (proxytype, addr, port, rdns, username, password) - - -def wrapmodule(module): - """wrapmodule(module) - - Attempts to replace a module's socket library with a SOCKS socket. Must set - a default proxy using setdefaultproxy(...) first. - This will only work on modules that import socket directly into the - namespace; - most of the Python Standard Library falls into this category. - """ - if _defaultproxy != None: - module.socket.socket = socksocket - else: - raise GeneralProxyError((4, "no proxy specified")) - - -class socksocket(socket.socket): - """socksocket([family[, type[, proto]]]) -> socket object - Open a SOCKS enabled socket. The parameters are the same as - those of the standard socket init. In order for SOCKS to work, - you must specify family=AF_INET, type=SOCK_STREAM and proto=0. - """ - - def __init__( - self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None - ): - _orgsocket.__init__(self, family, type, proto, _sock) - if _defaultproxy != None: - self.__proxy = _defaultproxy - else: - self.__proxy = (None, None, None, None, None, None) - self.__proxysockname = None - self.__proxypeername = None - self.__httptunnel = True - - def __recvall(self, count): - """__recvall(count) -> data - Receive EXACTLY the number of bytes requested from the socket. - Blocks until the required number of bytes have been received. - """ - data = self.recv(count) - while len(data) < count: - d = self.recv(count - len(data)) - if not d: - raise GeneralProxyError((0, "connection closed unexpectedly")) - data = data + d - return data - - def sendall(self, content, *args): - """ override socket.socket.sendall method to rewrite the header - for non-tunneling proxies if needed - """ - if not self.__httptunnel: - content = self.__rewriteproxy(content) - return super(socksocket, self).sendall(content, *args) - - def __rewriteproxy(self, header): - """ rewrite HTTP request headers to support non-tunneling proxies - (i.e. those which do not support the CONNECT method). - This only works for HTTP (not HTTPS) since HTTPS requires tunneling. - """ - host, endpt = None, None - hdrs = header.split("\r\n") - for hdr in hdrs: - if hdr.lower().startswith("host:"): - host = hdr - elif hdr.lower().startswith("get") or hdr.lower().startswith("post"): - endpt = hdr - if host and endpt: - hdrs.remove(host) - hdrs.remove(endpt) - host = host.split(" ")[1] - endpt = endpt.split(" ") - if self.__proxy[4] != None and self.__proxy[5] != None: - hdrs.insert(0, self.__getauthheader()) - hdrs.insert(0, "Host: %s" % host) - hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2])) - return "\r\n".join(hdrs) - - def __getauthheader(self): - auth = self.__proxy[4] + b":" + self.__proxy[5] - return "Proxy-Authorization: Basic " + base64.b64encode(auth).decode() - - def setproxy( - self, - proxytype=None, - addr=None, - port=None, - rdns=True, - username=None, - password=None, - headers=None, - ): - """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) - - Sets the proxy to be used. - proxytype - The type of the proxy to be used. Three types - are supported: PROXY_TYPE_SOCKS4 (including socks4a), - PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP - addr - The address of the server (IP or DNS). - port - The port of the server. Defaults to 1080 for SOCKS - servers and 8080 for HTTP proxy servers. - rdns - Should DNS queries be preformed on the remote side - (rather than the local side). The default is True. - Note: This has no effect with SOCKS4 servers. - username - Username to authenticate with to the server. - The default is no authentication. - password - Password to authenticate with to the server. - Only relevant when username is also provided. - headers - Additional or modified headers for the proxy connect - request. - """ - self.__proxy = ( - proxytype, - addr, - port, - rdns, - username.encode() if username else None, - password.encode() if password else None, - headers, - ) - - def __negotiatesocks5(self, destaddr, destport): - """__negotiatesocks5(self,destaddr,destport) - Negotiates a connection through a SOCKS5 server. - """ - # First we'll send the authentication packages we support. - if (self.__proxy[4] != None) and (self.__proxy[5] != None): - # The username/password details were supplied to the - # setproxy method so we support the USERNAME/PASSWORD - # authentication (in addition to the standard none). - self.sendall(struct.pack("BBBB", 0x05, 0x02, 0x00, 0x02)) - else: - # No username/password were entered, therefore we - # only support connections with no authentication. - self.sendall(struct.pack("BBB", 0x05, 0x01, 0x00)) - # We'll receive the server's response to determine which - # method was selected - chosenauth = self.__recvall(2) - if chosenauth[0:1] != chr(0x05).encode(): - self.close() - raise GeneralProxyError((1, _generalerrors[1])) - # Check the chosen authentication method - if chosenauth[1:2] == chr(0x00).encode(): - # No authentication is required - pass - elif chosenauth[1:2] == chr(0x02).encode(): - # Okay, we need to perform a basic username/password - # authentication. - packet = bytearray() - packet.append(0x01) - packet.append(len(self.__proxy[4])) - packet.extend(self.__proxy[4]) - packet.append(len(self.__proxy[5])) - packet.extend(self.__proxy[5]) - self.sendall(packet) - authstat = self.__recvall(2) - if authstat[0:1] != chr(0x01).encode(): - # Bad response - self.close() - raise GeneralProxyError((1, _generalerrors[1])) - if authstat[1:2] != chr(0x00).encode(): - # Authentication failed - self.close() - raise Socks5AuthError((3, _socks5autherrors[3])) - # Authentication succeeded - else: - # Reaching here is always bad - self.close() - if chosenauth[1] == chr(0xFF).encode(): - raise Socks5AuthError((2, _socks5autherrors[2])) - else: - raise GeneralProxyError((1, _generalerrors[1])) - # Now we can request the actual connection - req = struct.pack("BBB", 0x05, 0x01, 0x00) - # If the given destination address is an IP address, we'll - # use the IPv4 address request even if remote resolving was specified. - try: - ipaddr = socket.inet_aton(destaddr) - req = req + chr(0x01).encode() + ipaddr - except socket.error: - # Well it's not an IP number, so it's probably a DNS name. - if self.__proxy[3]: - # Resolve remotely - ipaddr = None - req = ( - req - + chr(0x03).encode() - + chr(len(destaddr)).encode() - + destaddr.encode() - ) - else: - # Resolve locally - ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) - req = req + chr(0x01).encode() + ipaddr - req = req + struct.pack(">H", destport) - self.sendall(req) - # Get the response - resp = self.__recvall(4) - if resp[0:1] != chr(0x05).encode(): - self.close() - raise GeneralProxyError((1, _generalerrors[1])) - elif resp[1:2] != chr(0x00).encode(): - # Connection failed - self.close() - if ord(resp[1:2]) <= 8: - raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])])) - else: - raise Socks5Error((9, _socks5errors[9])) - # Get the bound address/port - elif resp[3:4] == chr(0x01).encode(): - boundaddr = self.__recvall(4) - elif resp[3:4] == chr(0x03).encode(): - resp = resp + self.recv(1) - boundaddr = self.__recvall(ord(resp[4:5])) - else: - self.close() - raise GeneralProxyError((1, _generalerrors[1])) - boundport = struct.unpack(">H", self.__recvall(2))[0] - self.__proxysockname = (boundaddr, boundport) - if ipaddr != None: - self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) - else: - self.__proxypeername = (destaddr, destport) - - def getproxysockname(self): - """getsockname() -> address info - Returns the bound IP address and port number at the proxy. - """ - return self.__proxysockname - - def getproxypeername(self): - """getproxypeername() -> address info - Returns the IP and port number of the proxy. - """ - return _orgsocket.getpeername(self) - - def getpeername(self): - """getpeername() -> address info - Returns the IP address and port number of the destination - machine (note: getproxypeername returns the proxy) - """ - return self.__proxypeername - - def __negotiatesocks4(self, destaddr, destport): - """__negotiatesocks4(self,destaddr,destport) - Negotiates a connection through a SOCKS4 server. - """ - # Check if the destination address provided is an IP address - rmtrslv = False - try: - ipaddr = socket.inet_aton(destaddr) - except socket.error: - # It's a DNS name. Check where it should be resolved. - if self.__proxy[3]: - ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01) - rmtrslv = True - else: - ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) - # Construct the request packet - req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr - # The username parameter is considered userid for SOCKS4 - if self.__proxy[4] != None: - req = req + self.__proxy[4] - req = req + chr(0x00).encode() - # DNS name if remote resolving is required - # NOTE: This is actually an extension to the SOCKS4 protocol - # called SOCKS4A and may not be supported in all cases. - if rmtrslv: - req = req + destaddr + chr(0x00).encode() - self.sendall(req) - # Get the response from the server - resp = self.__recvall(8) - if resp[0:1] != chr(0x00).encode(): - # Bad data - self.close() - raise GeneralProxyError((1, _generalerrors[1])) - if resp[1:2] != chr(0x5A).encode(): - # Server returned an error - self.close() - if ord(resp[1:2]) in (91, 92, 93): - self.close() - raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90])) - else: - raise Socks4Error((94, _socks4errors[4])) - # Get the bound address/port - self.__proxysockname = ( - socket.inet_ntoa(resp[4:]), - struct.unpack(">H", resp[2:4])[0], - ) - if rmtrslv != None: - self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) - else: - self.__proxypeername = (destaddr, destport) - - def __negotiatehttp(self, destaddr, destport): - """__negotiatehttp(self,destaddr,destport) - Negotiates a connection through an HTTP server. - """ - # If we need to resolve locally, we do this now - if not self.__proxy[3]: - addr = socket.gethostbyname(destaddr) - else: - addr = destaddr - headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"] - wrote_host_header = False - wrote_auth_header = False - if self.__proxy[6] != None: - for key, val in self.__proxy[6].iteritems(): - headers += [key, ": ", val, "\r\n"] - wrote_host_header = key.lower() == "host" - wrote_auth_header = key.lower() == "proxy-authorization" - if not wrote_host_header: - headers += ["Host: ", destaddr, "\r\n"] - if not wrote_auth_header: - if self.__proxy[4] != None and self.__proxy[5] != None: - headers += [self.__getauthheader(), "\r\n"] - headers.append("\r\n") - self.sendall("".join(headers).encode()) - # We read the response until we get the string "\r\n\r\n" - resp = self.recv(1) - while resp.find("\r\n\r\n".encode()) == -1: - resp = resp + self.recv(1) - # We just need the first line to check if the connection - # was successful - statusline = resp.splitlines()[0].split(" ".encode(), 2) - if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()): - self.close() - raise GeneralProxyError((1, _generalerrors[1])) - try: - statuscode = int(statusline[1]) - except ValueError: - self.close() - raise GeneralProxyError((1, _generalerrors[1])) - if statuscode != 200: - self.close() - raise HTTPError((statuscode, statusline[2])) - self.__proxysockname = ("0.0.0.0", 0) - self.__proxypeername = (addr, destport) - - def connect(self, destpair): - """connect(self, despair) - Connects to the specified destination through a proxy. - destpar - A tuple of the IP/DNS address and the port number. - (identical to socket's connect). - To select the proxy server use setproxy(). - """ - # Do a minimal input check first - if ( - (not type(destpair) in (list, tuple)) - or (len(destpair) < 2) - or (not isinstance(destpair[0], (str, bytes))) - or (type(destpair[1]) != int) - ): - raise GeneralProxyError((5, _generalerrors[5])) - if self.__proxy[0] == PROXY_TYPE_SOCKS5: - if self.__proxy[2] != None: - portnum = self.__proxy[2] - else: - portnum = 1080 - _orgsocket.connect(self, (self.__proxy[1], portnum)) - self.__negotiatesocks5(destpair[0], destpair[1]) - elif self.__proxy[0] == PROXY_TYPE_SOCKS4: - if self.__proxy[2] != None: - portnum = self.__proxy[2] - else: - portnum = 1080 - _orgsocket.connect(self, (self.__proxy[1], portnum)) - self.__negotiatesocks4(destpair[0], destpair[1]) - elif self.__proxy[0] == PROXY_TYPE_HTTP: - if self.__proxy[2] != None: - portnum = self.__proxy[2] - else: - portnum = 8080 - _orgsocket.connect(self, (self.__proxy[1], portnum)) - self.__negotiatehttp(destpair[0], destpair[1]) - elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL: - if self.__proxy[2] != None: - portnum = self.__proxy[2] - else: - portnum = 8080 - _orgsocket.connect(self, (self.__proxy[1], portnum)) - if destpair[1] == 443: - self.__negotiatehttp(destpair[0], destpair[1]) - else: - self.__httptunnel = False - elif self.__proxy[0] == None: - _orgsocket.connect(self, (destpair[0], destpair[1])) - else: - raise GeneralProxyError((4, _generalerrors[4])) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/RECORD deleted file mode 100644 index 79e2d8e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/RECORD +++ /dev/null @@ -1,15 +0,0 @@ -idna-3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -idna-3.3.dist-info/LICENSE.md,sha256=otbk2UC9JNvnuWRc3hmpeSzFHbeuDVrNMBrIYMqj6DY,1523 -idna-3.3.dist-info/METADATA,sha256=BdqiAf8ou4x1nzIHp2_sDfXWjl7BrSUGpOeVzbYHQuQ,9765 -idna-3.3.dist-info/RECORD,, -idna-3.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 -idna-3.3.dist-info/top_level.txt,sha256=jSag9sEDqvSPftxOQy-ABfGV_RSy7oFh4zZJpODV8k0,5 -idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849 -idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374 -idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321 -idna/core.py,sha256=RFIkY-HhFZaDoBEFjGwyGd_vWI04uOAQjnzueMWqwOU,12795 -idna/idnadata.py,sha256=fzMzkCea2xieVxcrjngJ-2pLsKQNejPCZFlBajIuQdw,44025 -idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881 -idna/package_data.py,sha256=szxQhV0ZD0nKJ84Kuobw3l8q4_KeCyXjFRdpwIpKZmw,21 -idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -idna/uts46data.py,sha256=o-D7V-a0fOLZNd7tvxof6MYfUd0TBZzE2bLR5XO67xU,204400 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/top_level.txt deleted file mode 100644 index c40472e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -idna diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/LICENSE.md b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/LICENSE.md similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/LICENSE.md rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/LICENSE.md diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/METADATA similarity index 52% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/METADATA index 6446805..07f6193 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/METADATA @@ -1,12 +1,10 @@ Metadata-Version: 2.1 Name: idna -Version: 3.3 +Version: 3.4 Summary: Internationalized Domain Names in Applications (IDNA) -Home-page: https://github.com/kjd/idna -Author: Kim Davies -Author-email: kim@cynosure.com.au -License: BSD-3-Clause -Platform: UNKNOWN +Author-email: Kim Davies +Requires-Python: >=3.5 +Description-Content-Type: text/x-rst Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators @@ -21,28 +19,32 @@ Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet :: Name Service (DNS) Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Utilities -Requires-Python: >=3.5 -License-File: LICENSE.md +Project-URL: Changelog, https://github.com/kjd/idna/blob/master/HISTORY.rst +Project-URL: Issue tracker, https://github.com/kjd/idna/issues +Project-URL: Source, https://github.com/kjd/idna Internationalized Domain Names in Applications (IDNA) ===================================================== -Support for the Internationalised Domain Names in Applications -(IDNA) protocol as specified in `RFC 5891 `_. -This is the latest version of the protocol and is sometimes referred to as -“IDNA 2008”. +Support for the Internationalized Domain Names in +Applications (IDNA) protocol as specified in `RFC 5891 +`_. This is the latest version of +the protocol and is sometimes referred to as “IDNA 2008”. -This library also provides support for Unicode Technical Standard 46, -`Unicode IDNA Compatibility Processing `_. +This library also provides support for Unicode Technical +Standard 46, `Unicode IDNA Compatibility Processing +`_. -This acts as a suitable replacement for the “encodings.idna” module that -comes with the Python standard library, but which only supports the -older superseded IDNA specification (`RFC 3490 `_). +This acts as a suitable replacement for the “encodings.idna” +module that comes with the Python standard library, but which +only supports the older superseded IDNA specification (`RFC 3490 +`_). Basic functions are simply executed: @@ -58,24 +60,19 @@ Basic functions are simply executed: Installation ------------ -To install this library, you can use pip: - -.. code-block:: bash - - $ pip install idna - -Alternatively, you can install the package using the bundled setup script: +This package is available for installation from PyPI: .. code-block:: bash - $ python setup.py install + $ python3 -m pip install idna Usage ----- -For typical usage, the ``encode`` and ``decode`` functions will take a domain -name argument and perform a conversion to A-labels or U-labels respectively. +For typical usage, the ``encode`` and ``decode`` functions will take a +domain name argument and perform a conversion to A-labels or U-labels +respectively. .. code-block:: pycon @@ -96,8 +93,8 @@ You may use the codec encoding and decoding methods using the >>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna')) домен.испытание -Conversions can be applied at a per-label basis using the ``ulabel`` or ``alabel`` -functions if necessary: +Conversions can be applied at a per-label basis using the ``ulabel`` or +``alabel`` functions if necessary: .. code-block:: pycon @@ -107,20 +104,22 @@ functions if necessary: Compatibility Mapping (UTS #46) +++++++++++++++++++++++++++++++ -As described in `RFC 5895 `_, the IDNA -specification does not normalize input from different potential ways a user -may input a domain name. This functionality, known as a “mapping”, is -considered by the specification to be a local user-interface issue distinct -from IDNA conversion functionality. +As described in `RFC 5895 `_, the +IDNA specification does not normalize input from different potential +ways a user may input a domain name. This functionality, known as +a “mapping”, is considered by the specification to be a local +user-interface issue distinct from IDNA conversion functionality. -This library provides one such mapping, that was developed by the Unicode -Consortium. Known as `Unicode IDNA Compatibility Processing `_, -it provides for both a regular mapping for typical applications, as well as -a transitional mapping to help migrate from older IDNA 2003 applications. +This library provides one such mapping, that was developed by the +Unicode Consortium. Known as `Unicode IDNA Compatibility Processing +`_, it provides for both a regular +mapping for typical applications, as well as a transitional mapping to +help migrate from older IDNA 2003 applications. -For example, “Königsgäßchen” is not a permissible label as *LATIN CAPITAL -LETTER K* is not allowed (nor are capital letters in general). UTS 46 will -convert this into lower case prior to applying the IDNA conversion. +For example, “Königsgäßchen” is not a permissible label as *LATIN +CAPITAL LETTER K* is not allowed (nor are capital letters in general). +UTS 46 will convert this into lower case prior to applying the IDNA +conversion. .. code-block:: pycon @@ -133,36 +132,38 @@ convert this into lower case prior to applying the IDNA conversion. >>> print(idna.decode('xn--knigsgchen-b4a3dun')) königsgäßchen -Transitional processing provides conversions to help transition from the older -2003 standard to the current standard. For example, in the original IDNA -specification, the *LATIN SMALL LETTER SHARP S* (ß) was converted into two -*LATIN SMALL LETTER S* (ss), whereas in the current IDNA specification this -conversion is not performed. +Transitional processing provides conversions to help transition from +the older 2003 standard to the current standard. For example, in the +original IDNA specification, the *LATIN SMALL LETTER SHARP S* (ß) was +converted into two *LATIN SMALL LETTER S* (ss), whereas in the current +IDNA specification this conversion is not performed. .. code-block:: pycon >>> idna.encode('Königsgäßchen', uts46=True, transitional=True) 'xn--knigsgsschen-lcb0w' -Implementors should use transitional processing with caution, only in rare -cases where conversion from legacy labels to current labels must be performed -(i.e. IDNA implementations that pre-date 2008). For typical applications -that just need to convert labels, transitional processing is unlikely to be -beneficial and could produce unexpected incompatible results. +Implementors should use transitional processing with caution, only in +rare cases where conversion from legacy labels to current labels must be +performed (i.e. IDNA implementations that pre-date 2008). For typical +applications that just need to convert labels, transitional processing +is unlikely to be beneficial and could produce unexpected incompatible +results. ``encodings.idna`` Compatibility ++++++++++++++++++++++++++++++++ Function calls from the Python built-in ``encodings.idna`` module are mapped to their IDNA 2008 equivalents using the ``idna.compat`` module. -Simply substitute the ``import`` clause in your code to refer to the -new module name. +Simply substitute the ``import`` clause in your code to refer to the new +module name. Exceptions ---------- -All errors raised during the conversion following the specification should -raise an exception derived from the ``idna.IDNAError`` base class. +All errors raised during the conversion following the specification +should raise an exception derived from the ``idna.IDNAError`` base +class. More specific exceptions that may be generated as ``idna.IDNABidiError`` when the error reflects an illegal combination of left-to-right and @@ -180,29 +181,31 @@ tables for performance. These tables are derived from computing against eligibility criteria in the respective standards. These tables are computed using the command-line script ``tools/idna-data``. -This tool will fetch relevant codepoint data from the Unicode repository -and perform the required calculations to identify eligibility. There are +This tool will fetch relevant codepoint data from the Unicode repository +and perform the required calculations to identify eligibility. There are three main modes: -* ``idna-data make-libdata``. Generates ``idnadata.py`` and ``uts46data.py``, - the pre-calculated lookup tables using for IDNA and UTS 46 conversions. Implementors - who wish to track this library against a different Unicode version may use this tool - to manually generate a different version of the ``idnadata.py`` and ``uts46data.py`` - files. +* ``idna-data make-libdata``. Generates ``idnadata.py`` and + ``uts46data.py``, the pre-calculated lookup tables using for IDNA and + UTS 46 conversions. Implementors who wish to track this library against + a different Unicode version may use this tool to manually generate a + different version of the ``idnadata.py`` and ``uts46data.py`` files. * ``idna-data make-table``. Generate a table of the IDNA disposition - (e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix B.1 of RFC - 5892 and the pre-computed tables published by `IANA `_. + (e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix + B.1 of RFC 5892 and the pre-computed tables published by `IANA + `_. -* ``idna-data U+0061``. Prints debugging output on the various properties - associated with an individual Unicode codepoint (in this case, U+0061), that are - used to assess the IDNA and UTS 46 status of a codepoint. This is helpful in debugging - or analysis. +* ``idna-data U+0061``. Prints debugging output on the various + properties associated with an individual Unicode codepoint (in this + case, U+0061), that are used to assess the IDNA and UTS 46 status of a + codepoint. This is helpful in debugging or analysis. -The tool accepts a number of arguments, described using ``idna-data -h``. Most notably, -the ``--version`` argument allows the specification of the version of Unicode to use -in computing the table data. For example, ``idna-data --version 9.0.0 make-libdata`` -will generate library data against Unicode 9.0.0. +The tool accepts a number of arguments, described using ``idna-data +-h``. Most notably, the ``--version`` argument allows the specification +of the version of Unicode to use in computing the table data. For +example, ``idna-data --version 9.0.0 make-libdata`` will generate +library data against Unicode 9.0.0. Additional Notes @@ -211,26 +214,29 @@ Additional Notes * **Packages**. The latest tagged release version is published in the `Python Package Index `_. -* **Version support**. This library supports Python 3.5 and higher. As this library - serves as a low-level toolkit for a variety of applications, many of which strive - for broad compatibility with older Python versions, there is no rush to remove - older intepreter support. Removing support for older versions should be well - justified in that the maintenance burden has become too high. - -* **Python 2**. Python 2 is supported by version 2.x of this library. While active - development of the version 2.x series has ended, notable issues being corrected - may be backported to 2.x. Use "idna<3" in your requirements file if you need this - library for a Python 2 application. - -* **Testing**. The library has a test suite based on each rule of the IDNA specification, as - well as tests that are provided as part of the Unicode Technical Standard 46, - `Unicode IDNA Compatibility Processing `_. - -* **Emoji**. It is an occasional request to support emoji domains in this library. Encoding - of symbols like emoji is expressly prohibited by the technical standard IDNA 2008 and - emoji domains are broadly phased out across the domain industry due to associated security - risks. For now, applications that wish need to support these non-compliant labels may - wish to consider trying the encode/decode operation in this library first, and then falling - back to using `encodings.idna`. See `the Github project `_ - for more discussion. +* **Version support**. This library supports Python 3.5 and higher. + As this library serves as a low-level toolkit for a variety of + applications, many of which strive for broad compatibility with older + Python versions, there is no rush to remove older intepreter support. + Removing support for older versions should be well justified in that the + maintenance burden has become too high. + +* **Python 2**. Python 2 is supported by version 2.x of this library. + While active development of the version 2.x series has ended, notable + issues being corrected may be backported to 2.x. Use "idna<3" in your + requirements file if you need this library for a Python 2 application. + +* **Testing**. The library has a test suite based on each rule of the + IDNA specification, as well as tests that are provided as part of the + Unicode Technical Standard 46, `Unicode IDNA Compatibility Processing + `_. + +* **Emoji**. It is an occasional request to support emoji domains in + this library. Encoding of symbols like emoji is expressly prohibited by + the technical standard IDNA 2008 and emoji domains are broadly phased + out across the domain industry due to associated security risks. For + now, applications that wish need to support these non-compliant labels + may wish to consider trying the encode/decode operation in this library + first, and then falling back to using `encodings.idna`. See `the Github + project `_ for more discussion. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/RECORD new file mode 100644 index 0000000..ba63a2f --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/RECORD @@ -0,0 +1,14 @@ +idna-3.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +idna-3.4.dist-info/LICENSE.md,sha256=otbk2UC9JNvnuWRc3hmpeSzFHbeuDVrNMBrIYMqj6DY,1523 +idna-3.4.dist-info/METADATA,sha256=8aLSf9MFS7oB26pZh2hprg7eJp0UJSc-3rpf_evp4DA,9830 +idna-3.4.dist-info/RECORD,, +idna-3.4.dist-info/WHEEL,sha256=4TfKIB_xu-04bc2iKz6_zFt-gEFEEDU_31HGhqzOCE8,81 +idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849 +idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374 +idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321 +idna/core.py,sha256=1JxchwKzkxBSn7R_oCE12oBu3eVux0VzdxolmIad24M,12950 +idna/idnadata.py,sha256=xUjqKqiJV8Ho_XzBpAtv5JFoVPSupK-SUXvtjygUHqw,44375 +idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881 +idna/package_data.py,sha256=C_jHJzmX8PI4xq0jpzmcTMxpb5lDsq4o5VyxQzlVrZE,21 +idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +idna/uts46data.py,sha256=zvjZU24s58_uAS850Mcd0NnD0X7_gCMAMjzWNIeUJdc,206539 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/WHEEL similarity index 72% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/WHEEL index 884ceb5..668ba4d 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.4.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 -Generator: flit 3.5.1 +Generator: flit 3.7.1 Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/codec.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/codec.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/compat.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/core.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/core.py old mode 100755 new mode 100644 index 55ab967..4f30037 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/core.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/core.py @@ -339,7 +339,10 @@ def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes: if isinstance(s, (bytes, bytearray)): - s = s.decode('ascii') + try: + s = s.decode('ascii') + except UnicodeDecodeError: + raise IDNAError('should pass a unicode string to the function rather than a byte string.') if uts46: s = uts46_remap(s, std3_rules, transitional) trailing_dot = False diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/idnadata.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/idnadata.py old mode 100755 new mode 100644 index 1b5805d..67db462 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/idnadata.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/idnadata.py @@ -1,6 +1,6 @@ # This file is automatically generated by tools/idna-data -__version__ = '14.0.0' +__version__ = '15.0.0' scripts = { 'Greek': ( 0x37000000374, @@ -55,12 +55,13 @@ 0x16fe200016fe4, 0x16ff000016ff2, 0x200000002a6e0, - 0x2a7000002b739, + 0x2a7000002b73a, 0x2b7400002b81e, 0x2b8200002cea2, 0x2ceb00002ebe1, 0x2f8000002fa1e, 0x300000003134b, + 0x31350000323b0, ), 'Hebrew': ( 0x591000005c8, @@ -77,6 +78,7 @@ 0x304100003097, 0x309d000030a0, 0x1b0010001b120, + 0x1b1320001b133, 0x1b1500001b153, 0x1f2000001f201, ), @@ -93,6 +95,7 @@ 0x1affd0001afff, 0x1b0000001b001, 0x1b1200001b123, + 0x1b1550001b156, 0x1b1640001b168, ), } @@ -1331,7 +1334,7 @@ 0xcdd00000cdf, 0xce000000ce4, 0xce600000cf0, - 0xcf100000cf3, + 0xcf100000cf4, 0xd0000000d0d, 0xd0e00000d11, 0xd1200000d45, @@ -1366,7 +1369,7 @@ 0xeb400000ebe, 0xec000000ec5, 0xec600000ec7, - 0xec800000ece, + 0xec800000ecf, 0xed000000eda, 0xede00000ee0, 0xf0000000f01, @@ -1859,7 +1862,7 @@ 0xab200000ab27, 0xab280000ab2f, 0xab300000ab5b, - 0xab600000ab6a, + 0xab600000ab69, 0xabc00000abeb, 0xabec0000abee, 0xabf00000abfa, @@ -1943,7 +1946,7 @@ 0x10e8000010eaa, 0x10eab00010ead, 0x10eb000010eb2, - 0x10f0000010f1d, + 0x10efd00010f1d, 0x10f2700010f28, 0x10f3000010f51, 0x10f7000010f86, @@ -1966,7 +1969,7 @@ 0x111dc000111dd, 0x1120000011212, 0x1121300011238, - 0x1123e0001123f, + 0x1123e00011242, 0x1128000011287, 0x1128800011289, 0x1128a0001128e, @@ -2047,11 +2050,16 @@ 0x11d9300011d99, 0x11da000011daa, 0x11ee000011ef7, + 0x11f0000011f11, + 0x11f1200011f3b, + 0x11f3e00011f43, + 0x11f5000011f5a, 0x11fb000011fb1, 0x120000001239a, 0x1248000012544, 0x12f9000012ff1, - 0x130000001342f, + 0x1300000013430, + 0x1344000013456, 0x1440000014647, 0x1680000016a39, 0x16a4000016a5f, @@ -2079,7 +2087,9 @@ 0x1aff50001affc, 0x1affd0001afff, 0x1b0000001b123, + 0x1b1320001b133, 0x1b1500001b153, + 0x1b1550001b156, 0x1b1640001b168, 0x1b1700001b2fc, 0x1bc000001bc6b, @@ -2096,17 +2106,21 @@ 0x1da9b0001daa0, 0x1daa10001dab0, 0x1df000001df1f, + 0x1df250001df2b, 0x1e0000001e007, 0x1e0080001e019, 0x1e01b0001e022, 0x1e0230001e025, 0x1e0260001e02b, + 0x1e0300001e06e, + 0x1e08f0001e090, 0x1e1000001e12d, 0x1e1300001e13e, 0x1e1400001e14a, 0x1e14e0001e14f, 0x1e2900001e2af, 0x1e2c00001e2fa, + 0x1e4d00001e4fa, 0x1e7e00001e7e7, 0x1e7e80001e7ec, 0x1e7ed0001e7ef, @@ -2115,13 +2129,13 @@ 0x1e8d00001e8d7, 0x1e9220001e94c, 0x1e9500001e95a, - 0x1fbf00001fbfa, 0x200000002a6e0, - 0x2a7000002b739, + 0x2a7000002b73a, 0x2b7400002b81e, 0x2b8200002cea2, 0x2ceb00002ebe1, 0x300000003134b, + 0x31350000323b0, ), 'CONTEXTJ': ( 0x200c0000200e, diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/intranges.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/intranges.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/package_data.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/package_data.py old mode 100755 new mode 100644 index f5ea87c..8501893 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/package_data.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/package_data.py @@ -1,2 +1,2 @@ -__version__ = '3.3' +__version__ = '3.4' diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/uts46data.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/uts46data.py old mode 100755 new mode 100644 index 8f65705..186796c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/uts46data.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna/uts46data.py @@ -7,7 +7,7 @@ """IDNA Mapping Table from UTS46.""" -__version__ = '14.0.0' +__version__ = '15.0.0' def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x0, '3'), @@ -1300,7 +1300,7 @@ def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0xCE6, 'V'), (0xCF0, 'X'), (0xCF1, 'V'), - (0xCF3, 'X'), + (0xCF4, 'X'), (0xD00, 'V'), (0xD0D, 'X'), (0xD0E, 'V'), @@ -1368,7 +1368,7 @@ def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0xEC6, 'V'), (0xEC7, 'X'), (0xEC8, 'V'), - (0xECE, 'X'), + (0xECF, 'X'), (0xED0, 'V'), (0xEDA, 'X'), (0xEDC, 'M', 'ຫນ'), @@ -5917,7 +5917,7 @@ def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x10EAE, 'X'), (0x10EB0, 'V'), (0x10EB2, 'X'), - (0x10F00, 'V'), + (0x10EFD, 'V'), (0x10F28, 'X'), (0x10F30, 'V'), (0x10F5A, 'X'), @@ -5956,7 +5956,7 @@ def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x11200, 'V'), (0x11212, 'X'), (0x11213, 'V'), - (0x1123F, 'X'), + (0x11242, 'X'), (0x11280, 'V'), (0x11287, 'X'), (0x11288, 'V'), @@ -6097,6 +6097,8 @@ def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x11AA3, 'X'), (0x11AB0, 'V'), (0x11AF9, 'X'), + (0x11B00, 'V'), + (0x11B0A, 'X'), (0x11C00, 'V'), (0x11C09, 'X'), (0x11C0A, 'V'), @@ -6139,13 +6141,19 @@ def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x11DAA, 'X'), (0x11EE0, 'V'), (0x11EF9, 'X'), - (0x11FB0, 'V'), - (0x11FB1, 'X'), - (0x11FC0, 'V'), + (0x11F00, 'V'), ] def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ + (0x11F11, 'X'), + (0x11F12, 'V'), + (0x11F3B, 'X'), + (0x11F3E, 'V'), + (0x11F5A, 'X'), + (0x11FB0, 'V'), + (0x11FB1, 'X'), + (0x11FC0, 'V'), (0x11FF2, 'X'), (0x11FFF, 'V'), (0x1239A, 'X'), @@ -6158,7 +6166,9 @@ def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x12F90, 'V'), (0x12FF3, 'X'), (0x13000, 'V'), - (0x1342F, 'X'), + (0x13430, 'X'), + (0x13440, 'V'), + (0x13456, 'X'), (0x14400, 'V'), (0x14647, 'X'), (0x16800, 'V'), @@ -6236,6 +6246,10 @@ def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x18D00, 'V'), (0x18D09, 'X'), (0x1AFF0, 'V'), + ] + +def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1AFF4, 'X'), (0x1AFF5, 'V'), (0x1AFFC, 'X'), @@ -6243,13 +6257,13 @@ def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1AFFF, 'X'), (0x1B000, 'V'), (0x1B123, 'X'), + (0x1B132, 'V'), + (0x1B133, 'X'), (0x1B150, 'V'), (0x1B153, 'X'), + (0x1B155, 'V'), + (0x1B156, 'X'), (0x1B164, 'V'), - ] - -def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1B168, 'X'), (0x1B170, 'V'), (0x1B2FC, 'X'), @@ -6295,6 +6309,8 @@ def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D1EB, 'X'), (0x1D200, 'V'), (0x1D246, 'X'), + (0x1D2C0, 'V'), + (0x1D2D4, 'X'), (0x1D2E0, 'V'), (0x1D2F4, 'X'), (0x1D300, 'V'), @@ -6334,6 +6350,10 @@ def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D41E, 'M', 'e'), (0x1D41F, 'M', 'f'), (0x1D420, 'M', 'g'), + ] + +def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D421, 'M', 'h'), (0x1D422, 'M', 'i'), (0x1D423, 'M', 'j'), @@ -6350,10 +6370,6 @@ def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D42E, 'M', 'u'), (0x1D42F, 'M', 'v'), (0x1D430, 'M', 'w'), - ] - -def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D431, 'M', 'x'), (0x1D432, 'M', 'y'), (0x1D433, 'M', 'z'), @@ -6438,6 +6454,10 @@ def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D482, 'M', 'a'), (0x1D483, 'M', 'b'), (0x1D484, 'M', 'c'), + ] + +def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D485, 'M', 'd'), (0x1D486, 'M', 'e'), (0x1D487, 'M', 'f'), @@ -6454,10 +6474,6 @@ def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D492, 'M', 'q'), (0x1D493, 'M', 'r'), (0x1D494, 'M', 's'), - ] - -def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D495, 'M', 't'), (0x1D496, 'M', 'u'), (0x1D497, 'M', 'v'), @@ -6542,6 +6558,10 @@ def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D4E9, 'M', 'z'), (0x1D4EA, 'M', 'a'), (0x1D4EB, 'M', 'b'), + ] + +def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D4EC, 'M', 'c'), (0x1D4ED, 'M', 'd'), (0x1D4EE, 'M', 'e'), @@ -6558,10 +6578,6 @@ def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D4F9, 'M', 'p'), (0x1D4FA, 'M', 'q'), (0x1D4FB, 'M', 'r'), - ] - -def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D4FC, 'M', 's'), (0x1D4FD, 'M', 't'), (0x1D4FE, 'M', 'u'), @@ -6646,6 +6662,10 @@ def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D550, 'M', 'y'), (0x1D551, 'X'), (0x1D552, 'M', 'a'), + ] + +def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D553, 'M', 'b'), (0x1D554, 'M', 'c'), (0x1D555, 'M', 'd'), @@ -6662,10 +6682,6 @@ def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D560, 'M', 'o'), (0x1D561, 'M', 'p'), (0x1D562, 'M', 'q'), - ] - -def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D563, 'M', 'r'), (0x1D564, 'M', 's'), (0x1D565, 'M', 't'), @@ -6750,6 +6766,10 @@ def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D5B4, 'M', 'u'), (0x1D5B5, 'M', 'v'), (0x1D5B6, 'M', 'w'), + ] + +def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D5B7, 'M', 'x'), (0x1D5B8, 'M', 'y'), (0x1D5B9, 'M', 'z'), @@ -6766,10 +6786,6 @@ def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D5C4, 'M', 'k'), (0x1D5C5, 'M', 'l'), (0x1D5C6, 'M', 'm'), - ] - -def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D5C7, 'M', 'n'), (0x1D5C8, 'M', 'o'), (0x1D5C9, 'M', 'p'), @@ -6854,6 +6870,10 @@ def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D618, 'M', 'q'), (0x1D619, 'M', 'r'), (0x1D61A, 'M', 's'), + ] + +def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D61B, 'M', 't'), (0x1D61C, 'M', 'u'), (0x1D61D, 'M', 'v'), @@ -6870,10 +6890,6 @@ def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D628, 'M', 'g'), (0x1D629, 'M', 'h'), (0x1D62A, 'M', 'i'), - ] - -def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D62B, 'M', 'j'), (0x1D62C, 'M', 'k'), (0x1D62D, 'M', 'l'), @@ -6958,6 +6974,10 @@ def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D67C, 'M', 'm'), (0x1D67D, 'M', 'n'), (0x1D67E, 'M', 'o'), + ] + +def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D67F, 'M', 'p'), (0x1D680, 'M', 'q'), (0x1D681, 'M', 'r'), @@ -6974,10 +6994,6 @@ def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D68C, 'M', 'c'), (0x1D68D, 'M', 'd'), (0x1D68E, 'M', 'e'), - ] - -def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D68F, 'M', 'f'), (0x1D690, 'M', 'g'), (0x1D691, 'M', 'h'), @@ -7062,6 +7078,10 @@ def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D6E2, 'M', 'α'), (0x1D6E3, 'M', 'β'), (0x1D6E4, 'M', 'γ'), + ] + +def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D6E5, 'M', 'δ'), (0x1D6E6, 'M', 'ε'), (0x1D6E7, 'M', 'ζ'), @@ -7078,10 +7098,6 @@ def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D6F2, 'M', 'ρ'), (0x1D6F3, 'M', 'θ'), (0x1D6F4, 'M', 'σ'), - ] - -def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D6F5, 'M', 'τ'), (0x1D6F6, 'M', 'υ'), (0x1D6F7, 'M', 'φ'), @@ -7166,6 +7182,10 @@ def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D747, 'M', 'σ'), (0x1D749, 'M', 'τ'), (0x1D74A, 'M', 'υ'), + ] + +def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D74B, 'M', 'φ'), (0x1D74C, 'M', 'χ'), (0x1D74D, 'M', 'ψ'), @@ -7182,10 +7202,6 @@ def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D758, 'M', 'γ'), (0x1D759, 'M', 'δ'), (0x1D75A, 'M', 'ε'), - ] - -def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D75B, 'M', 'ζ'), (0x1D75C, 'M', 'η'), (0x1D75D, 'M', 'θ'), @@ -7270,6 +7286,10 @@ def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D7AD, 'M', 'δ'), (0x1D7AE, 'M', 'ε'), (0x1D7AF, 'M', 'ζ'), + ] + +def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D7B0, 'M', 'η'), (0x1D7B1, 'M', 'θ'), (0x1D7B2, 'M', 'ι'), @@ -7286,10 +7306,6 @@ def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1D7BE, 'M', 'υ'), (0x1D7BF, 'M', 'φ'), (0x1D7C0, 'M', 'χ'), - ] - -def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1D7C1, 'M', 'ψ'), (0x1D7C2, 'M', 'ω'), (0x1D7C3, 'M', '∂'), @@ -7359,6 +7375,8 @@ def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1DAB0, 'X'), (0x1DF00, 'V'), (0x1DF1F, 'X'), + (0x1DF25, 'V'), + (0x1DF2B, 'X'), (0x1E000, 'V'), (0x1E007, 'X'), (0x1E008, 'V'), @@ -7369,6 +7387,75 @@ def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1E025, 'X'), (0x1E026, 'V'), (0x1E02B, 'X'), + (0x1E030, 'M', 'а'), + (0x1E031, 'M', 'б'), + (0x1E032, 'M', 'в'), + ] + +def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E033, 'M', 'г'), + (0x1E034, 'M', 'д'), + (0x1E035, 'M', 'е'), + (0x1E036, 'M', 'ж'), + (0x1E037, 'M', 'з'), + (0x1E038, 'M', 'и'), + (0x1E039, 'M', 'к'), + (0x1E03A, 'M', 'л'), + (0x1E03B, 'M', 'м'), + (0x1E03C, 'M', 'о'), + (0x1E03D, 'M', 'п'), + (0x1E03E, 'M', 'р'), + (0x1E03F, 'M', 'с'), + (0x1E040, 'M', 'т'), + (0x1E041, 'M', 'у'), + (0x1E042, 'M', 'ф'), + (0x1E043, 'M', 'х'), + (0x1E044, 'M', 'ц'), + (0x1E045, 'M', 'ч'), + (0x1E046, 'M', 'ш'), + (0x1E047, 'M', 'ы'), + (0x1E048, 'M', 'э'), + (0x1E049, 'M', 'ю'), + (0x1E04A, 'M', 'ꚉ'), + (0x1E04B, 'M', 'ә'), + (0x1E04C, 'M', 'і'), + (0x1E04D, 'M', 'ј'), + (0x1E04E, 'M', 'ө'), + (0x1E04F, 'M', 'ү'), + (0x1E050, 'M', 'ӏ'), + (0x1E051, 'M', 'а'), + (0x1E052, 'M', 'б'), + (0x1E053, 'M', 'в'), + (0x1E054, 'M', 'г'), + (0x1E055, 'M', 'д'), + (0x1E056, 'M', 'е'), + (0x1E057, 'M', 'ж'), + (0x1E058, 'M', 'з'), + (0x1E059, 'M', 'и'), + (0x1E05A, 'M', 'к'), + (0x1E05B, 'M', 'л'), + (0x1E05C, 'M', 'о'), + (0x1E05D, 'M', 'п'), + (0x1E05E, 'M', 'с'), + (0x1E05F, 'M', 'у'), + (0x1E060, 'M', 'ф'), + (0x1E061, 'M', 'х'), + (0x1E062, 'M', 'ц'), + (0x1E063, 'M', 'ч'), + (0x1E064, 'M', 'ш'), + (0x1E065, 'M', 'ъ'), + (0x1E066, 'M', 'ы'), + (0x1E067, 'M', 'ґ'), + (0x1E068, 'M', 'і'), + (0x1E069, 'M', 'ѕ'), + (0x1E06A, 'M', 'џ'), + (0x1E06B, 'M', 'ҫ'), + (0x1E06C, 'M', 'ꙑ'), + (0x1E06D, 'M', 'ұ'), + (0x1E06E, 'X'), + (0x1E08F, 'V'), + (0x1E090, 'X'), (0x1E100, 'V'), (0x1E12D, 'X'), (0x1E130, 'V'), @@ -7383,6 +7470,8 @@ def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1E2FA, 'X'), (0x1E2FF, 'V'), (0x1E300, 'X'), + (0x1E4D0, 'V'), + (0x1E4FA, 'X'), (0x1E7E0, 'V'), (0x1E7E7, 'X'), (0x1E7E8, 'V'), @@ -7390,10 +7479,6 @@ def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1E7ED, 'V'), (0x1E7EF, 'X'), (0x1E7F0, 'V'), - ] - -def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1E7FF, 'X'), (0x1E800, 'V'), (0x1E8C5, 'X'), @@ -7409,6 +7494,10 @@ def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1E907, 'M', '𞤩'), (0x1E908, 'M', '𞤪'), (0x1E909, 'M', '𞤫'), + ] + +def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1E90A, 'M', '𞤬'), (0x1E90B, 'M', '𞤭'), (0x1E90C, 'M', '𞤮'), @@ -7494,10 +7583,6 @@ def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1EE31, 'M', 'ص'), (0x1EE32, 'M', 'ق'), (0x1EE33, 'X'), - ] - -def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1EE34, 'M', 'ش'), (0x1EE35, 'M', 'ت'), (0x1EE36, 'M', 'ث'), @@ -7513,6 +7598,10 @@ def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1EE48, 'X'), (0x1EE49, 'M', 'ي'), (0x1EE4A, 'X'), + ] + +def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1EE4B, 'M', 'ل'), (0x1EE4C, 'X'), (0x1EE4D, 'M', 'ن'), @@ -7598,10 +7687,6 @@ def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1EEA3, 'M', 'د'), (0x1EEA4, 'X'), (0x1EEA5, 'M', 'و'), - ] - -def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1EEA6, 'M', 'ز'), (0x1EEA7, 'M', 'ح'), (0x1EEA8, 'M', 'ط'), @@ -7617,6 +7702,10 @@ def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1EEB2, 'M', 'ق'), (0x1EEB3, 'M', 'ر'), (0x1EEB4, 'M', 'ش'), + ] + +def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1EEB5, 'M', 'ت'), (0x1EEB6, 'M', 'ث'), (0x1EEB7, 'M', 'خ'), @@ -7702,10 +7791,6 @@ def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1F141, 'M', 'r'), (0x1F142, 'M', 's'), (0x1F143, 'M', 't'), - ] - -def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1F144, 'M', 'u'), (0x1F145, 'M', 'v'), (0x1F146, 'M', 'w'), @@ -7721,6 +7806,10 @@ def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1F150, 'V'), (0x1F16A, 'M', 'mc'), (0x1F16B, 'M', 'md'), + ] + +def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1F16C, 'M', 'mr'), (0x1F16D, 'V'), (0x1F190, 'M', 'dj'), @@ -7793,23 +7882,19 @@ def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1F266, 'X'), (0x1F300, 'V'), (0x1F6D8, 'X'), - (0x1F6DD, 'V'), + (0x1F6DC, 'V'), (0x1F6ED, 'X'), (0x1F6F0, 'V'), (0x1F6FD, 'X'), (0x1F700, 'V'), - (0x1F774, 'X'), - (0x1F780, 'V'), - (0x1F7D9, 'X'), + (0x1F777, 'X'), + (0x1F77B, 'V'), + (0x1F7DA, 'X'), (0x1F7E0, 'V'), (0x1F7EC, 'X'), (0x1F7F0, 'V'), (0x1F7F1, 'X'), (0x1F800, 'V'), - ] - -def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x1F80C, 'X'), (0x1F810, 'V'), (0x1F848, 'X'), @@ -7825,24 +7910,24 @@ def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x1FA54, 'X'), (0x1FA60, 'V'), (0x1FA6E, 'X'), + ] + +def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1FA70, 'V'), - (0x1FA75, 'X'), - (0x1FA78, 'V'), (0x1FA7D, 'X'), (0x1FA80, 'V'), - (0x1FA87, 'X'), + (0x1FA89, 'X'), (0x1FA90, 'V'), - (0x1FAAD, 'X'), - (0x1FAB0, 'V'), - (0x1FABB, 'X'), - (0x1FAC0, 'V'), + (0x1FABE, 'X'), + (0x1FABF, 'V'), (0x1FAC6, 'X'), - (0x1FAD0, 'V'), - (0x1FADA, 'X'), + (0x1FACE, 'V'), + (0x1FADC, 'X'), (0x1FAE0, 'V'), - (0x1FAE8, 'X'), + (0x1FAE9, 'X'), (0x1FAF0, 'V'), - (0x1FAF7, 'X'), + (0x1FAF9, 'X'), (0x1FB00, 'V'), (0x1FB93, 'X'), (0x1FB94, 'V'), @@ -7861,7 +7946,7 @@ def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x20000, 'V'), (0x2A6E0, 'X'), (0x2A700, 'V'), - (0x2B739, 'X'), + (0x2B73A, 'X'), (0x2B740, 'V'), (0x2B81E, 'X'), (0x2B820, 'V'), @@ -7910,10 +7995,6 @@ def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F827, 'M', '勤'), (0x2F828, 'M', '勺'), (0x2F829, 'M', '包'), - ] - -def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x2F82A, 'M', '匆'), (0x2F82B, 'M', '北'), (0x2F82C, 'M', '卉'), @@ -7933,6 +8014,10 @@ def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F83C, 'M', '咞'), (0x2F83D, 'M', '吸'), (0x2F83E, 'M', '呈'), + ] + +def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F83F, 'M', '周'), (0x2F840, 'M', '咢'), (0x2F841, 'M', '哶'), @@ -8014,10 +8099,6 @@ def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F88F, 'M', '𪎒'), (0x2F890, 'M', '廾'), (0x2F891, 'M', '𢌱'), - ] - -def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x2F893, 'M', '舁'), (0x2F894, 'M', '弢'), (0x2F896, 'M', '㣇'), @@ -8037,6 +8118,10 @@ def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F8A4, 'M', '𢛔'), (0x2F8A5, 'M', '惇'), (0x2F8A6, 'M', '慈'), + ] + +def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F8A7, 'M', '慌'), (0x2F8A8, 'M', '慎'), (0x2F8A9, 'M', '慌'), @@ -8118,10 +8203,6 @@ def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F8F5, 'M', '殺'), (0x2F8F6, 'M', '殻'), (0x2F8F7, 'M', '𣪍'), - ] - -def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x2F8F8, 'M', '𡴋'), (0x2F8F9, 'M', '𣫺'), (0x2F8FA, 'M', '汎'), @@ -8141,6 +8222,10 @@ def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F908, 'M', '港'), (0x2F909, 'M', '湮'), (0x2F90A, 'M', '㴳'), + ] + +def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F90B, 'M', '滋'), (0x2F90C, 'M', '滇'), (0x2F90D, 'M', '𣻑'), @@ -8222,10 +8307,6 @@ def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F95B, 'M', '穏'), (0x2F95C, 'M', '𥥼'), (0x2F95D, 'M', '𥪧'), - ] - -def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x2F95F, 'X'), (0x2F960, 'M', '䈂'), (0x2F961, 'M', '𥮫'), @@ -8245,6 +8326,10 @@ def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F96F, 'M', '縂'), (0x2F970, 'M', '繅'), (0x2F971, 'M', '䌴'), + ] + +def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F972, 'M', '𦈨'), (0x2F973, 'M', '𦉇'), (0x2F974, 'M', '䍙'), @@ -8326,10 +8411,6 @@ def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F9C0, 'M', '蟡'), (0x2F9C1, 'M', '蠁'), (0x2F9C2, 'M', '䗹'), - ] - -def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: - return [ (0x2F9C3, 'M', '衠'), (0x2F9C4, 'M', '衣'), (0x2F9C5, 'M', '𧙧'), @@ -8349,6 +8430,10 @@ def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2F9D3, 'M', '𧲨'), (0x2F9D4, 'M', '貫'), (0x2F9D5, 'M', '賁'), + ] + +def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F9D6, 'M', '贛'), (0x2F9D7, 'M', '起'), (0x2F9D8, 'M', '𧼯'), @@ -8423,6 +8508,8 @@ def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: (0x2FA1E, 'X'), (0x30000, 'V'), (0x3134B, 'X'), + (0x31350, 'V'), + (0x323B0, 'X'), (0xE0100, 'I'), (0xE01F0, 'X'), ] @@ -8509,4 +8596,5 @@ def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + _seg_78() + _seg_79() + _seg_80() + + _seg_81() ) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/LICENSE deleted file mode 100644 index be7e092..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2017-2019 Jason R. Coombs, Barry Warsaw - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/RECORD deleted file mode 100644 index 565ffbb..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/RECORD +++ /dev/null @@ -1,15 +0,0 @@ -importlib_metadata-4.10.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -importlib_metadata-4.10.1.dist-info/LICENSE,sha256=wNe6dAchmJ1VvVB8D9oTc-gHHadCuaSBAev36sYEM6U,571 -importlib_metadata-4.10.1.dist-info/METADATA,sha256=-HDYj3iK6bcjwN5MAoO58Op6WQIYQfbhl6ZaPqL0IZI,3989 -importlib_metadata-4.10.1.dist-info/RECORD,, -importlib_metadata-4.10.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -importlib_metadata-4.10.1.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19 -importlib_metadata/__init__.py,sha256=7WxDdbPPu4Wy3VeMTApd-JlPQoENgVDyDH6aqyE7acE,30175 -importlib_metadata/_adapters.py,sha256=B6fCi5-8mLVDFUZj3krI5nAo-mKp1dH_qIavyIyFrJs,1862 -importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743 -importlib_metadata/_compat.py,sha256=EU2XCFBPFByuI0Of6XkAuBYbzqSyjwwwwqmsK4ccna0,1826 -importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895 -importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068 -importlib_metadata/_meta.py,sha256=_F48Hu_jFxkfKWz5wcYS8vO23qEygbVdF9r-6qh-hjE,1154 -importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166 -importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/METADATA similarity index 66% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/METADATA index 7327b88..ee0ffd9 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/METADATA @@ -1,12 +1,10 @@ Metadata-Version: 2.1 Name: importlib-metadata -Version: 4.10.1 +Version: 6.6.0 Summary: Read metadata from Python packages Home-page: https://github.com/python/importlib_metadata Author: Jason R. Coombs Author-email: jaraco@jaraco.com -License: UNKNOWN -Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License @@ -17,32 +15,33 @@ License-File: LICENSE Requires-Dist: zipp (>=0.5) Requires-Dist: typing-extensions (>=3.6.4) ; python_version < "3.8" Provides-Extra: docs -Requires-Dist: sphinx ; extra == 'docs' -Requires-Dist: jaraco.packaging (>=8.2) ; extra == 'docs' +Requires-Dist: sphinx (>=3.5) ; extra == 'docs' +Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs' Requires-Dist: rst.linker (>=1.9) ; extra == 'docs' +Requires-Dist: furo ; extra == 'docs' +Requires-Dist: sphinx-lint ; extra == 'docs' +Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs' Provides-Extra: perf Requires-Dist: ipython ; extra == 'perf' Provides-Extra: testing Requires-Dist: pytest (>=6) ; extra == 'testing' Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing' -Requires-Dist: pytest-flake8 ; extra == 'testing' +Requires-Dist: flake8 (<5) ; extra == 'testing' Requires-Dist: pytest-cov ; extra == 'testing' -Requires-Dist: pytest-enabler (>=1.0.1) ; extra == 'testing' +Requires-Dist: pytest-enabler (>=1.3) ; extra == 'testing' Requires-Dist: packaging ; extra == 'testing' Requires-Dist: pyfakefs ; extra == 'testing' Requires-Dist: flufl.flake8 ; extra == 'testing' Requires-Dist: pytest-perf (>=0.9.2) ; extra == 'testing' Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing' -Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: pytest-flake8 ; (python_version < "3.12") and extra == 'testing' Requires-Dist: importlib-resources (>=1.3) ; (python_version < "3.9") and extra == 'testing' .. image:: https://img.shields.io/pypi/v/importlib_metadata.svg - :target: `PyPI link`_ + :target: https://pypi.org/project/importlib_metadata .. image:: https://img.shields.io/pypi/pyversions/importlib_metadata.svg - :target: `PyPI link`_ - -.. _PyPI link: https://pypi.org/project/importlib_metadata .. image:: https://github.com/python/importlib_metadata/workflows/tests/badge.svg :target: https://github.com/python/importlib_metadata/actions?query=workflow%3A%22tests%22 @@ -55,9 +54,11 @@ Requires-Dist: importlib-resources (>=1.3) ; (python_version < "3.9") and extra .. image:: https://readthedocs.org/projects/importlib-metadata/badge/?version=latest :target: https://importlib-metadata.readthedocs.io/en/latest/?badge=latest -.. image:: https://img.shields.io/badge/skeleton-2021-informational +.. image:: https://img.shields.io/badge/skeleton-2023-informational :target: https://blog.jaraco.com/skeleton +.. image:: https://tidelift.com/badges/package/pypi/importlib-metadata + :target: https://tidelift.com/subscription/pkg/pypi-importlib-metadata?utm_source=pypi-importlib-metadata&utm_medium=readme Library to access the metadata for a Python package. @@ -78,9 +79,11 @@ were contributed to different versions in the standard library: * - importlib_metadata - stdlib - * - 4.8 + * - 6.5 + - 3.12 + * - 4.13 - 3.11 - * - 4.4 + * - 4.6 - 3.10 * - 1.4 - 3.8 @@ -89,7 +92,7 @@ were contributed to different versions in the standard library: Usage ===== -See the `online documentation `_ +See the `online documentation `_ for usage details. `Finder authors @@ -113,6 +116,20 @@ Project details * Project home: https://github.com/python/importlib_metadata * Report bugs at: https://github.com/python/importlib_metadata/issues * Code hosting: https://github.com/python/importlib_metadata - * Documentation: https://importlib_metadata.readthedocs.io/ + * Documentation: https://importlib-metadata.readthedocs.io/ + +For Enterprise +============== + +Available as part of the Tidelift Subscription. + +This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. + +`Learn more `_. +Security Contact +================ +To report a security vulnerability, please use the +`Tidelift security contact `_. +Tidelift will coordinate the fix and disclosure. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/RECORD new file mode 100644 index 0000000..6a2bc26 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/RECORD @@ -0,0 +1,16 @@ +importlib_metadata-6.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +importlib_metadata-6.6.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +importlib_metadata-6.6.0.dist-info/METADATA,sha256=PkZEyOdZ2YQVAGc1nwMGJ2o6cs7hWeBL2Ya5NoyUGbA,4958 +importlib_metadata-6.6.0.dist-info/RECORD,, +importlib_metadata-6.6.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +importlib_metadata-6.6.0.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19 +importlib_metadata/__init__.py,sha256=e_TtkQm2IsKEBa4H9j502DqMUZ-k6KQa53o9VX47ofs,29949 +importlib_metadata/_adapters.py,sha256=i8S6Ib1OQjcILA-l4gkzktMZe18TaeUNI49PLRp6OBU,2454 +importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743 +importlib_metadata/_compat.py,sha256=VZUVAQntwXR1lZUYgZjOnXsV_B5mRV5FjNjwVP_EMSo,2096 +importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895 +importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068 +importlib_metadata/_meta.py,sha256=I2AuaUMr5a6cTdZleV9WpyqUCSooqqV-zSzr1qn7FMw,1615 +importlib_metadata/_py39compat.py,sha256=2Tk5twb_VgLCY-1NEAQjdZp_S9OFMC-pUzP2isuaPsQ,1098 +importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166 +importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/WHEEL similarity index 65% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/WHEEL index becc9a6..1f37c02 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) +Generator: bdist_wheel (0.40.0) Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-6.6.0.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/__init__.py old mode 100755 new mode 100644 index 7713e1e..281cfb0 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/__init__.py @@ -5,6 +5,7 @@ import sys import zipp import email +import inspect import pathlib import operator import textwrap @@ -12,12 +13,14 @@ import functools import itertools import posixpath +import contextlib import collections -from . import _adapters, _meta +from . import _adapters, _meta, _py39compat from ._collections import FreezableDefaultDict, Pair from ._compat import ( NullFinder, + StrPath, install, pypy_partial, ) @@ -29,8 +32,7 @@ from importlib import import_module from importlib.abc import MetaPathFinder from itertools import starmap -from typing import List, Mapping, Optional, Union - +from typing import Iterable, List, Mapping, Optional, Set, cast __all__ = [ 'Distribution', @@ -51,11 +53,11 @@ class PackageNotFoundError(ModuleNotFoundError): """The package was not found.""" - def __str__(self): + def __str__(self) -> str: return f"No package metadata was found for {self.name}" @property - def name(self): + def name(self) -> str: # type: ignore[override] (name,) = self.args return name @@ -121,7 +123,7 @@ def read(text, filter_=None): yield Pair(name, value) @staticmethod - def valid(line): + def valid(line: str): return line and not line.startswith('#') @@ -139,6 +141,7 @@ class DeprecatedTuple: 1 """ + # Do not remove prior to 2023-05-01 or Python 3.13 _warn = functools.partial( warnings.warn, "EntryPoint tuple interface is deprecated. Access members by name.", @@ -157,6 +160,15 @@ class EntryPoint(DeprecatedTuple): See `the packaging docs on entry points `_ for more information. + + >>> ep = EntryPoint( + ... name=None, group=None, value='package.module:attr [extra1, extra2]') + >>> ep.module + 'package.module' + >>> ep.attr + 'attr' + >>> ep.extras + ['extra1', 'extra2'] """ pattern = re.compile( @@ -180,9 +192,13 @@ class EntryPoint(DeprecatedTuple): following the attr, and following any extras. """ + name: str + value: str + group: str + dist: Optional['Distribution'] = None - def __init__(self, name, value, group): + def __init__(self, name: str, value: str, group: str) -> None: vars(self).update(name=name, value=value, group=group) def load(self): @@ -196,36 +212,47 @@ def load(self): return functools.reduce(getattr, attrs, module) @property - def module(self): + def module(self) -> str: match = self.pattern.match(self.value) + assert match is not None return match.group('module') @property - def attr(self): + def attr(self) -> str: match = self.pattern.match(self.value) + assert match is not None return match.group('attr') @property - def extras(self): + def extras(self) -> List[str]: match = self.pattern.match(self.value) - return list(re.finditer(r'\w+', match.group('extras') or '')) + assert match is not None + return re.findall(r'\w+', match.group('extras') or '') def _for(self, dist): vars(self).update(dist=dist) return self - def __iter__(self): + def matches(self, **params): """ - Supply iter so one may construct dicts of EntryPoints by name. + EntryPoint matches the given parameters. + + >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]') + >>> ep.matches(group='foo') + True + >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]') + True + >>> ep.matches(group='foo', name='other') + False + >>> ep.matches() + True + >>> ep.matches(extras=['extra1', 'extra2']) + True + >>> ep.matches(module='bing') + True + >>> ep.matches(attr='bong') + True """ - msg = ( - "Construction of dict of EntryPoints is deprecated in " - "favor of EntryPoints." - ) - warnings.warn(msg, DeprecationWarning) - return iter((self.name, self)) - - def matches(self, **params): attrs = (getattr(self, param) for param in params) return all(map(operator.eq, params.values(), attrs)) @@ -247,103 +274,21 @@ def __repr__(self): f'group={self.group!r})' ) - def __hash__(self): + def __hash__(self) -> int: return hash(self._key()) -class DeprecatedList(list): - """ - Allow an otherwise immutable object to implement mutability - for compatibility. - - >>> recwarn = getfixture('recwarn') - >>> dl = DeprecatedList(range(3)) - >>> dl[0] = 1 - >>> dl.append(3) - >>> del dl[3] - >>> dl.reverse() - >>> dl.sort() - >>> dl.extend([4]) - >>> dl.pop(-1) - 4 - >>> dl.remove(1) - >>> dl += [5] - >>> dl + [6] - [1, 2, 5, 6] - >>> dl + (6,) - [1, 2, 5, 6] - >>> dl.insert(0, 0) - >>> dl - [0, 1, 2, 5] - >>> dl == [0, 1, 2, 5] - True - >>> dl == (0, 1, 2, 5) - True - >>> len(recwarn) - 1 - """ - - _warn = functools.partial( - warnings.warn, - "EntryPoints list interface is deprecated. Cast to list if needed.", - DeprecationWarning, - stacklevel=pypy_partial(2), - ) - - def _wrap_deprecated_method(method_name: str): # type: ignore - def wrapped(self, *args, **kwargs): - self._warn() - return getattr(super(), method_name)(*args, **kwargs) - - return wrapped - - for method_name in [ - '__setitem__', - '__delitem__', - 'append', - 'reverse', - 'extend', - 'pop', - 'remove', - '__iadd__', - 'insert', - 'sort', - ]: - locals()[method_name] = _wrap_deprecated_method(method_name) - - def __add__(self, other): - if not isinstance(other, tuple): - self._warn() - other = tuple(other) - return self.__class__(tuple(self) + other) - - def __eq__(self, other): - if not isinstance(other, tuple): - self._warn() - other = tuple(other) - - return tuple(self).__eq__(other) - - -class EntryPoints(DeprecatedList): +class EntryPoints(tuple): """ An immutable collection of selectable EntryPoint objects. """ __slots__ = () - def __getitem__(self, name): # -> EntryPoint: + def __getitem__(self, name: str) -> EntryPoint: # type: ignore[override] """ Get the EntryPoint in self matching name. """ - if isinstance(name, int): - warnings.warn( - "Accessing entry points by index is deprecated. " - "Cast to tuple if needed.", - DeprecationWarning, - stacklevel=2, - ) - return super().__getitem__(name) try: return next(iter(self.select(name=name))) except StopIteration: @@ -354,23 +299,19 @@ def select(self, **params): Select entry points from self that match the given parameters (typically group and/or name). """ - return EntryPoints(ep for ep in self if ep.matches(**params)) + return EntryPoints(ep for ep in self if _py39compat.ep_matches(ep, **params)) @property - def names(self): + def names(self) -> Set[str]: """ Return the set of all names of all entry points. """ return {ep.name for ep in self} @property - def groups(self): + def groups(self) -> Set[str]: """ Return the set of all groups of all entry points. - - For coverage while SelectableGroups is present. - >>> EntryPoints().groups - set() """ return {ep.group for ep in self} @@ -386,130 +327,58 @@ def _from_text(text): ) -class Deprecated: - """ - Compatibility add-in for mapping to indicate that - mapping behavior is deprecated. - - >>> recwarn = getfixture('recwarn') - >>> class DeprecatedDict(Deprecated, dict): pass - >>> dd = DeprecatedDict(foo='bar') - >>> dd.get('baz', None) - >>> dd['foo'] - 'bar' - >>> list(dd) - ['foo'] - >>> list(dd.keys()) - ['foo'] - >>> 'foo' in dd - True - >>> list(dd.values()) - ['bar'] - >>> len(recwarn) - 1 - """ - - _warn = functools.partial( - warnings.warn, - "SelectableGroups dict interface is deprecated. Use select.", - DeprecationWarning, - stacklevel=pypy_partial(2), - ) - - def __getitem__(self, name): - self._warn() - return super().__getitem__(name) - - def get(self, name, default=None): - self._warn() - return super().get(name, default) - - def __iter__(self): - self._warn() - return super().__iter__() - - def __contains__(self, *args): - self._warn() - return super().__contains__(*args) - - def keys(self): - self._warn() - return super().keys() - - def values(self): - self._warn() - return super().values() - - -class SelectableGroups(Deprecated, dict): - """ - A backward- and forward-compatible result from - entry_points that fully implements the dict interface. - """ - - @classmethod - def load(cls, eps): - by_group = operator.attrgetter('group') - ordered = sorted(eps, key=by_group) - grouped = itertools.groupby(ordered, by_group) - return cls((group, EntryPoints(eps)) for group, eps in grouped) - - @property - def _all(self): - """ - Reconstruct a list of all entrypoints from the groups. - """ - groups = super(Deprecated, self).values() - return EntryPoints(itertools.chain.from_iterable(groups)) - - @property - def groups(self): - return self._all.groups - - @property - def names(self): - """ - for coverage: - >>> SelectableGroups().names - set() - """ - return self._all.names - - def select(self, **params): - if not params: - return self - return self._all.select(**params) - - class PackagePath(pathlib.PurePosixPath): """A reference to a path in a package""" - def read_text(self, encoding='utf-8'): + hash: Optional["FileHash"] + size: int + dist: "Distribution" + + def read_text(self, encoding: str = 'utf-8') -> str: # type: ignore[override] with self.locate().open(encoding=encoding) as stream: return stream.read() - def read_binary(self): + def read_binary(self) -> bytes: with self.locate().open('rb') as stream: return stream.read() - def locate(self): + def locate(self) -> pathlib.Path: """Return a path-like object for this path""" return self.dist.locate_file(self) class FileHash: - def __init__(self, spec): + def __init__(self, spec: str) -> None: self.mode, _, self.value = spec.partition('=') - def __repr__(self): + def __repr__(self) -> str: return f'' -class Distribution: +class DeprecatedNonAbstract: + def __new__(cls, *args, **kwargs): + all_names = { + name for subclass in inspect.getmro(cls) for name in vars(subclass) + } + abstract = { + name + for name in all_names + if getattr(getattr(cls, name), '__isabstractmethod__', False) + } + if abstract: + warnings.warn( + f"Unimplemented abstract methods {abstract}", + DeprecationWarning, + stacklevel=2, + ) + return super().__new__(cls) + + +class Distribution(DeprecatedNonAbstract): """A Python distribution package.""" @abc.abstractmethod - def read_text(self, filename): + def read_text(self, filename) -> Optional[str]: """Attempt to load metadata file given by the name. :param filename: The name of the file in the distribution info. @@ -517,14 +386,14 @@ def read_text(self, filename): """ @abc.abstractmethod - def locate_file(self, path): + def locate_file(self, path: StrPath) -> pathlib.Path: """ Given a path to a file in this distribution, return a path to it. """ @classmethod - def from_name(cls, name): + def from_name(cls, name: str) -> "Distribution": """Return the Distribution for the given package name. :param name: The name of the distribution package to search for. @@ -532,17 +401,17 @@ def from_name(cls, name): package, if found. :raises PackageNotFoundError: When the named package's distribution metadata cannot be found. + :raises ValueError: When an invalid value is supplied for name. """ - for resolver in cls._discover_resolvers(): - dists = resolver(DistributionFinder.Context(name=name)) - dist = next(iter(dists), None) - if dist is not None: - return dist - else: + if not name: + raise ValueError("A distribution name is required.") + try: + return next(iter(cls.discover(name=name))) + except StopIteration: raise PackageNotFoundError(name) @classmethod - def discover(cls, **kwargs): + def discover(cls, **kwargs) -> Iterable["Distribution"]: """Return an iterable of Distribution objects for all packages. Pass a ``context`` or pass keyword arguments for constructing @@ -560,7 +429,7 @@ def discover(cls, **kwargs): ) @staticmethod - def at(path): + def at(path: StrPath) -> "Distribution": """Return a Distribution for the indicated metadata path :param path: a string or path-like object @@ -583,7 +452,7 @@ def metadata(self) -> _meta.PackageMetadata: The returned object will have keys that name the various bits of metadata. See PEP 566 for details. """ - text = ( + opt_text = ( self.read_text('METADATA') or self.read_text('PKG-INFO') # This last clause is here to support old egg-info files. Its @@ -591,10 +460,11 @@ def metadata(self) -> _meta.PackageMetadata: # (which points to the egg-info file) attribute unchanged. or self.read_text('') ) + text = cast(str, opt_text) return _adapters.Message(email.message_from_string(text)) @property - def name(self): + def name(self) -> str: """Return the 'Name' metadata for the distribution package.""" return self.metadata['Name'] @@ -604,23 +474,23 @@ def _normalized_name(self): return Prepared.normalize(self.name) @property - def version(self): + def version(self) -> str: """Return the 'Version' metadata for the distribution package.""" return self.metadata['Version'] @property - def entry_points(self): + def entry_points(self) -> EntryPoints: return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self) @property - def files(self): + def files(self) -> Optional[List[PackagePath]]: """Files in this distribution. :return: List of PackagePath for this distribution or None Result is `None` if the metadata file that enumerates files - (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is - missing. + (i.e. RECORD for dist-info, or installed-files.txt or + SOURCES.txt for egg-info) is missing. Result may be empty if the metadata exists but is empty. """ @@ -633,9 +503,19 @@ def make_file(name, hash=None, size_str=None): @pass_none def make_files(lines): - return list(starmap(make_file, csv.reader(lines))) + return starmap(make_file, csv.reader(lines)) - return make_files(self._read_files_distinfo() or self._read_files_egginfo()) + @pass_none + def skip_missing_files(package_paths): + return list(filter(lambda path: path.locate().exists(), package_paths)) + + return skip_missing_files( + make_files( + self._read_files_distinfo() + or self._read_files_egginfo_installed() + or self._read_files_egginfo_sources() + ) + ) def _read_files_distinfo(self): """ @@ -644,16 +524,51 @@ def _read_files_distinfo(self): text = self.read_text('RECORD') return text and text.splitlines() - def _read_files_egginfo(self): + def _read_files_egginfo_installed(self): + """ + Read installed-files.txt and return lines in a similar + CSV-parsable format as RECORD: each file must be placed + relative to the site-packages directory and must also be + quoted (since file names can contain literal commas). + + This file is written when the package is installed by pip, + but it might not be written for other installation methods. + Assume the file is accurate if it exists. + """ + text = self.read_text('installed-files.txt') + # Prepend the .egg-info/ subdir to the lines in this file. + # But this subdir is only available from PathDistribution's + # self._path. + subdir = getattr(self, '_path', None) + if not text or not subdir: + return + + paths = ( + (subdir / name) + .resolve() + .relative_to(self.locate_file('').resolve()) + .as_posix() + for name in text.splitlines() + ) + return map('"{}"'.format, paths) + + def _read_files_egginfo_sources(self): """ - SOURCES.txt might contain literal commas, so wrap each line - in quotes. + Read SOURCES.txt and return lines in a similar CSV-parsable + format as RECORD: each file name must be quoted (since it + might contain literal commas). + + Note that SOURCES.txt is not a reliable source for what + files are installed by a package. This file is generated + for a source archive, and the files that are present + there (e.g. setup.py) may not correctly reflect the files + that are present after the package has been installed. """ text = self.read_text('SOURCES.txt') return text and map('"{}"'.format, text.splitlines()) @property - def requires(self): + def requires(self) -> Optional[List[str]]: """Generated requirements specified for this Distribution""" reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs() return reqs and list(reqs) @@ -663,7 +578,7 @@ def _read_dist_info_reqs(self): def _read_egg_info_reqs(self): source = self.read_text('requires.txt') - return source and self._deps_from_requires_text(source) + return pass_none(self._deps_from_requires_text)(source) @classmethod def _deps_from_requires_text(cls, source): @@ -732,7 +647,7 @@ def __init__(self, **kwargs): vars(self).update(kwargs) @property - def path(self): + def path(self) -> List[str]: """ The sequence of directory path that a distribution finder should search. @@ -743,7 +658,7 @@ def path(self): return vars(self).get('path', sys.path) @abc.abstractmethod - def find_distributions(self, context=Context()): + def find_distributions(self, context=Context()) -> Iterable[Distribution]: """ Find distributions. @@ -767,7 +682,7 @@ def __new__(cls, root): return super().__new__(cls) def __init__(self, root): - self.root = str(root) + self.root = root def joinpath(self, child): return pathlib.Path(self.root, child) @@ -878,7 +793,9 @@ class MetadataPathFinder(NullFinder, DistributionFinder): of Python that do not have a PathFinder find_distributions(). """ - def find_distributions(self, context=DistributionFinder.Context()): + def find_distributions( + self, context=DistributionFinder.Context() + ) -> Iterable["PathDistribution"]: """ Find distributions. @@ -898,19 +815,19 @@ def _search_paths(cls, name, paths): path.search(prepared) for path in map(FastPath, paths) ) - def invalidate_caches(cls): + def invalidate_caches(cls) -> None: FastPath.__new__.cache_clear() class PathDistribution(Distribution): - def __init__(self, path: SimplePath): + def __init__(self, path: SimplePath) -> None: """Construct a distribution. :param path: SimplePath indicating the metadata directory. """ self._path = path - def read_text(self, filename): + def read_text(self, filename: StrPath) -> Optional[str]: with suppress( FileNotFoundError, IsADirectoryError, @@ -920,9 +837,11 @@ def read_text(self, filename): ): return self._path.joinpath(filename).read_text(encoding='utf-8') + return None + read_text.__doc__ = Distribution.read_text.__doc__ - def locate_file(self, path): + def locate_file(self, path: StrPath) -> pathlib.Path: return self._path.parent / path @property @@ -932,17 +851,30 @@ def _normalized_name(self): normalized name from the file system path. """ stem = os.path.basename(str(self._path)) - return self._name_from_stem(stem) or super()._normalized_name + return ( + pass_none(Prepared.normalize)(self._name_from_stem(stem)) + or super()._normalized_name + ) - def _name_from_stem(self, stem): - name, ext = os.path.splitext(stem) + @staticmethod + def _name_from_stem(stem): + """ + >>> PathDistribution._name_from_stem('foo-3.0.egg-info') + 'foo' + >>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info') + 'CherryPy' + >>> PathDistribution._name_from_stem('face.egg-info') + 'face' + >>> PathDistribution._name_from_stem('foo.bar') + """ + filename, ext = os.path.splitext(stem) if ext not in ('.dist-info', '.egg-info'): return - name, sep, rest = stem.partition('-') + name, sep, rest = filename.partition('-') return name -def distribution(distribution_name): +def distribution(distribution_name) -> Distribution: """Get the ``Distribution`` instance for the named package. :param distribution_name: The name of the distribution package as a string. @@ -951,7 +883,7 @@ def distribution(distribution_name): return Distribution.from_name(distribution_name) -def distributions(**kwargs): +def distributions(**kwargs) -> Iterable[Distribution]: """Get all ``Distribution`` instances in the current environment. :return: An iterable of ``Distribution`` instances. @@ -968,7 +900,7 @@ def metadata(distribution_name) -> _meta.PackageMetadata: return Distribution.from_name(distribution_name).metadata -def version(distribution_name): +def version(distribution_name) -> str: """Get the version string for the named package. :param distribution_name: The name of the distribution package to query. @@ -978,32 +910,31 @@ def version(distribution_name): return distribution(distribution_name).version -def entry_points(**params) -> Union[EntryPoints, SelectableGroups]: +_unique = functools.partial( + unique_everseen, + key=_py39compat.normalized_name, +) +""" +Wrapper for ``distributions`` to return unique distributions by name. +""" + + +def entry_points(**params) -> EntryPoints: """Return EntryPoint objects for all installed packages. Pass selection parameters (group or name) to filter the result to entry points matching those properties (see EntryPoints.select()). - For compatibility, returns ``SelectableGroups`` object unless - selection parameters are supplied. In the future, this function - will return ``EntryPoints`` instead of ``SelectableGroups`` - even when no selection parameters are supplied. - - For maximum future compatibility, pass selection parameters - or invoke ``.select`` with parameters on the result. - - :return: EntryPoints or SelectableGroups for all installed packages. + :return: EntryPoints for all installed packages. """ - norm_name = operator.attrgetter('_normalized_name') - unique = functools.partial(unique_everseen, key=norm_name) eps = itertools.chain.from_iterable( - dist.entry_points for dist in unique(distributions()) + dist.entry_points for dist in _unique(distributions()) ) - return SelectableGroups.load(eps).select(**params) + return EntryPoints(eps).select(**params) -def files(distribution_name): +def files(distribution_name) -> Optional[List[PackagePath]]: """Return a list of files for the named package. :param distribution_name: The name of the distribution package to query. @@ -1012,11 +943,11 @@ def files(distribution_name): return distribution(distribution_name).files -def requires(distribution_name): +def requires(distribution_name) -> Optional[List[str]]: """ Return a list of requirements for the named package. - :return: An iterator of requirements, suitable for + :return: An iterable of requirements, suitable for packaging.requirement.Requirement. """ return distribution(distribution_name).requires @@ -1044,8 +975,13 @@ def _top_level_declared(dist): def _top_level_inferred(dist): - return { - f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name + opt_names = { + f.parts[0] if len(f.parts) > 1 else inspect.getmodulename(f) for f in always_iterable(dist.files) - if f.suffix == ".py" } + + @pass_none + def importable_name(name): + return '.' not in name + + return filter(importable_name, opt_names) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_adapters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_adapters.py old mode 100755 new mode 100644 index aa460d3..e33cba5 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_adapters.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_adapters.py @@ -1,8 +1,20 @@ +import functools +import warnings import re import textwrap import email.message from ._text import FoldedCase +from ._compat import pypy_partial + + +# Do not remove prior to 2024-01-01 or Python 3.14 +_warn = functools.partial( + warnings.warn, + "Implicit None on return values is deprecated and will raise KeyErrors.", + DeprecationWarning, + stacklevel=pypy_partial(2), +) class Message(email.message.Message): @@ -39,6 +51,16 @@ def __init__(self, *args, **kwargs): def __iter__(self): return super().__iter__() + def __getitem__(self, item): + """ + Warn users that a ``KeyError`` can be expected when a + mising key is supplied. Ref python/importlib_metadata#371. + """ + res = super().__getitem__(item) + if res is None: + _warn() + return res + def _repair_headers(self): def redent(value): "Correct for RFC822 indentation" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_collections.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_collections.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_compat.py old mode 100755 new mode 100644 index 8fe4e4e..638e779 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_compat.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_compat.py @@ -1,6 +1,9 @@ +import os import sys import platform +from typing import Union + __all__ = ['install', 'NullFinder', 'Protocol'] @@ -8,6 +11,7 @@ try: from typing import Protocol except ImportError: # pragma: no cover + # Python 3.7 compatibility from typing_extensions import Protocol # type: ignore @@ -69,3 +73,10 @@ def pypy_partial(val): """ is_pypy = platform.python_implementation() == 'PyPy' return val + is_pypy + + +if sys.version_info >= (3, 9): + StrPath = Union[str, os.PathLike[str]] +else: + # PathLike is only subscriptable at runtime in 3.9+ + StrPath = Union[str, "os.PathLike[str]"] # pragma: no cover diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_functools.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_functools.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_itertools.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_itertools.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_meta.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_meta.py old mode 100755 new mode 100644 index 37ee43e..0c7e879 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_meta.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_meta.py @@ -1,5 +1,5 @@ from ._compat import Protocol -from typing import Any, Dict, Iterator, List, TypeVar, Union +from typing import Any, Dict, Iterator, List, Optional, TypeVar, Union, overload _T = TypeVar("_T") @@ -18,7 +18,21 @@ def __getitem__(self, key: str) -> str: def __iter__(self) -> Iterator[str]: ... # pragma: no cover - def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]: + @overload + def get(self, name: str, failobj: None = None) -> Optional[str]: + ... # pragma: no cover + + @overload + def get(self, name: str, failobj: _T) -> Union[str, _T]: + ... # pragma: no cover + + # overload per python/importlib_metadata#435 + @overload + def get_all(self, name: str, failobj: None = None) -> Optional[List[Any]]: + ... # pragma: no cover + + @overload + def get_all(self, name: str, failobj: _T) -> Union[List[Any], _T]: """ Return all values associated with a possibly multi-valued key. """ @@ -30,18 +44,19 @@ def json(self) -> Dict[str, Union[str, List[str]]]: """ -class SimplePath(Protocol): +class SimplePath(Protocol[_T]): """ A minimal subset of pathlib.Path required by PathDistribution. """ - def joinpath(self) -> 'SimplePath': + def joinpath(self, other: Union[str, _T]) -> _T: ... # pragma: no cover - def __truediv__(self) -> 'SimplePath': + def __truediv__(self, other: Union[str, _T]) -> _T: ... # pragma: no cover - def parent(self) -> 'SimplePath': + @property + def parent(self) -> _T: ... # pragma: no cover def read_text(self) -> str: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_py39compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_py39compat.py new file mode 100644 index 0000000..cde4558 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_py39compat.py @@ -0,0 +1,35 @@ +""" +Compatibility layer with Python 3.8/3.9 +""" +from typing import TYPE_CHECKING, Any, Optional + +if TYPE_CHECKING: # pragma: no cover + # Prevent circular imports on runtime. + from . import Distribution, EntryPoint +else: + Distribution = EntryPoint = Any + + +def normalized_name(dist: Distribution) -> Optional[str]: + """ + Honor name normalization for distributions that don't provide ``_normalized_name``. + """ + try: + return dist._normalized_name + except AttributeError: + from . import Prepared # -> delay to prevent circular imports. + + return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name']) + + +def ep_matches(ep: EntryPoint, **params) -> bool: + """ + Workaround for ``EntryPoint`` objects without the ``matches`` method. + """ + try: + return ep.matches(**params) + except AttributeError: + from . import EntryPoint # -> delay to prevent circular imports. + + # Reconstruct the EntryPoint object to make sure it is compatible. + return EntryPoint(ep.name, ep.value, ep.group).matches(**params) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_text.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata/_text.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/idna-3.3.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/METADATA new file mode 100644 index 0000000..038059d --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/METADATA @@ -0,0 +1,104 @@ +Metadata-Version: 2.1 +Name: importlib-resources +Version: 5.12.0 +Summary: Read resources from Python packages +Home-page: https://github.com/python/importlib_resources +Author: Barry Warsaw +Author-email: barry@python.org +Project-URL: Documentation, https://importlib-resources.readthedocs.io/ +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.7 +License-File: LICENSE +Requires-Dist: zipp (>=3.1.0) ; python_version < "3.10" +Provides-Extra: docs +Requires-Dist: sphinx (>=3.5) ; extra == 'docs' +Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs' +Requires-Dist: rst.linker (>=1.9) ; extra == 'docs' +Requires-Dist: furo ; extra == 'docs' +Requires-Dist: sphinx-lint ; extra == 'docs' +Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs' +Provides-Extra: testing +Requires-Dist: pytest (>=6) ; extra == 'testing' +Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing' +Requires-Dist: flake8 (<5) ; extra == 'testing' +Requires-Dist: pytest-cov ; extra == 'testing' +Requires-Dist: pytest-enabler (>=1.3) ; extra == 'testing' +Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: pytest-flake8 ; (python_version < "3.12") and extra == 'testing' + +.. image:: https://img.shields.io/pypi/v/importlib_resources.svg + :target: https://pypi.org/project/importlib_resources + +.. image:: https://img.shields.io/pypi/pyversions/importlib_resources.svg + +.. image:: https://github.com/python/importlib_resources/workflows/tests/badge.svg + :target: https://github.com/python/importlib_resources/actions?query=workflow%3A%22tests%22 + :alt: tests + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + :alt: Code style: Black + +.. image:: https://readthedocs.org/projects/importlib-resources/badge/?version=latest + :target: https://importlib-resources.readthedocs.io/en/latest/?badge=latest + +.. image:: https://img.shields.io/badge/skeleton-2023-informational + :target: https://blog.jaraco.com/skeleton + +.. image:: https://tidelift.com/badges/package/pypi/importlib-resources + :target: https://tidelift.com/subscription/pkg/pypi-importlib-resources?utm_source=pypi-importlib-resources&utm_medium=readme + +``importlib_resources`` is a backport of Python standard library +`importlib.resources +`_ +module for older Pythons. + +The key goal of this module is to replace parts of `pkg_resources +`_ with a +solution in Python's stdlib that relies on well-defined APIs. This makes +reading resources included in packages easier, with more stable and consistent +semantics. + +Compatibility +============= + +New features are introduced in this third-party library and later merged +into CPython. The following table indicates which versions of this library +were contributed to different versions in the standard library: + +.. list-table:: + :header-rows: 1 + + * - importlib_resources + - stdlib + * - 5.9 + - 3.12 + * - 5.7 + - 3.11 + * - 5.0 + - 3.10 + * - 1.3 + - 3.9 + * - 0.5 (?) + - 3.7 + +For Enterprise +============== + +Available as part of the Tidelift Subscription. + +This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. + +`Learn more `_. + +Security Contact +================ + +To report a security vulnerability, please use the +`Tidelift security contact `_. +Tidelift will coordinate the fix and disclosure. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/RECORD new file mode 100644 index 0000000..065a3e4 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/RECORD @@ -0,0 +1,49 @@ +importlib_resources-5.12.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +importlib_resources-5.12.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +importlib_resources-5.12.0.dist-info/METADATA,sha256=uEY10nhKI-5nXImnXgsNt7BDYf7u2Qw8-BO2K2hmlJA,4111 +importlib_resources-5.12.0.dist-info/RECORD,, +importlib_resources-5.12.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +importlib_resources-5.12.0.dist-info/top_level.txt,sha256=fHIjHU1GZwAjvcydpmUnUrTnbvdiWjG4OEVZK8by0TQ,20 +importlib_resources/__init__.py,sha256=evPm12kLgYqTm-pbzm60bOuumumT8IpBNWFp0uMyrzE,506 +importlib_resources/_adapters.py,sha256=vprJGbUeHbajX6XCuMP6J3lMrqCi-P_MTlziJUR7jfk,4482 +importlib_resources/_common.py,sha256=jSC4xfLdcMNbtbWHtpzbFkNa0W7kvf__nsYn14C_AEU,5457 +importlib_resources/_compat.py,sha256=4oDJPpo63eH_3l5BkBHmkjAQW4HGs5qvYd2-ziLA_ck,2935 +importlib_resources/_itertools.py,sha256=eDisV6RqiNZOogLSXf6LOGHOYc79FGgPrKNLzFLmCrU,1277 +importlib_resources/_legacy.py,sha256=0TKdZixxLWA-xwtAZw4HcpqJmj4Xprx1Zkcty0gTRZY,3481 +importlib_resources/abc.py,sha256=Icr2IJ2QtH7vvAB9vC5WRJ9KBoaDyJa7KUs8McuROzo,5140 +importlib_resources/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/readers.py,sha256=i80n49L2rBAtlB9bU0zAeQHiEXxcdP99-pWR6ED-ypY,4312 +importlib_resources/simple.py,sha256=0__2TQBTQoqkajYmNPt1HxERcReAT6boVKJA328pr04,2576 +importlib_resources/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/tests/_compat.py,sha256=YTSB0U1R9oADnh6GrQcOCgojxcF_N6H1LklymEWf9SQ,708 +importlib_resources/tests/_path.py,sha256=nkv3ek7D1U898v921rYbldDCtKri2oyYOi3EJqGjEGU,1289 +importlib_resources/tests/data01/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/tests/data01/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4 +importlib_resources/tests/data01/subdirectory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/tests/data01/subdirectory/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4 +importlib_resources/tests/data01/utf-16.file,sha256=t5q9qhxX0rYqItBOM8D3ylwG-RHrnOYteTLtQr6sF7g,44 +importlib_resources/tests/data01/utf-8.file,sha256=kwWgYG4yQ-ZF2X_WA66EjYPmxJRn-w8aSOiS9e8tKYY,20 +importlib_resources/tests/data02/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/tests/data02/one/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/tests/data02/one/resource1.txt,sha256=10flKac7c-XXFzJ3t-AB5MJjlBy__dSZvPE_dOm2q6U,13 +importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt,sha256=jnrBBztxYrtQck7cmVnc4xQVO4-agzAZDGSFkAWtlFw,10 +importlib_resources/tests/data02/two/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/tests/data02/two/resource2.txt,sha256=lt2jbN3TMn9QiFKM832X39bU_62UptDdUkoYzkvEbl0,13 +importlib_resources/tests/namespacedata01/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4 +importlib_resources/tests/namespacedata01/utf-16.file,sha256=t5q9qhxX0rYqItBOM8D3ylwG-RHrnOYteTLtQr6sF7g,44 +importlib_resources/tests/namespacedata01/utf-8.file,sha256=kwWgYG4yQ-ZF2X_WA66EjYPmxJRn-w8aSOiS9e8tKYY,20 +importlib_resources/tests/test_compatibilty_files.py,sha256=95N_R7aik8cvnE6sBJpsxmP0K5plOWRIJDgbalD-Hpw,3314 +importlib_resources/tests/test_contents.py,sha256=V1Xfk3lqTDdvUsZuV18Kndf0CT_tkM2oEIwk9Vv0rhg,968 +importlib_resources/tests/test_custom.py,sha256=jVYg9idEVdUN6idHUfDDlZ-zDWl56qYNbj5QrcZO76Y,1124 +importlib_resources/tests/test_files.py,sha256=W5XoBWSTr84Ke15UtjqWLet2iUDUyJfQxbST4PDlj2w,3283 +importlib_resources/tests/test_open.py,sha256=9qvdC6Eu2Kn3mh3xDR5HUEQoePSKIecTxU4vnH9veO8,2671 +importlib_resources/tests/test_path.py,sha256=XR5RI7_zndI_Nqw9eHU1tDmSGIo29N1GP8INodPc584,2142 +importlib_resources/tests/test_read.py,sha256=BYdRqZEEJE17NHPArpZW9VsIwMlna1BpHyWkgCvEKWk,2512 +importlib_resources/tests/test_reader.py,sha256=YS1RHDzSIo7Dy3AhoK7sY-cFWIFnfkMNfQR3xlXsgio,4990 +importlib_resources/tests/test_resource.py,sha256=cPHz7VLwq6bFznZ-JDYE3f_4VJthQztRHKhiA9SriT0,8270 +importlib_resources/tests/update-zips.py,sha256=x-SrO5v87iLLUMXyefxDwAd3imAs_slI94sLWvJ6N40,1417 +importlib_resources/tests/util.py,sha256=TQz12vSkHNjGlF3hB0OR4kx2sCR-xcj0wI2esDyHR9I,5001 +importlib_resources/tests/zipdata01/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/tests/zipdata01/ziptestdata.zip,sha256=z5Of4dsv3T0t-46B0MsVhxlhsPGMz28aUhJDWpj3_oY,876 +importlib_resources/tests/zipdata02/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_resources/tests/zipdata02/ziptestdata.zip,sha256=ydI-_j-xgQ7tDxqBp9cjOqXBGxUp6ZBbwVJu6Xj-nrY,698 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/WHEEL similarity index 65% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/WHEEL index becc9a6..57e3d84 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) +Generator: bdist_wheel (0.38.4) Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/top_level.txt new file mode 100644 index 0000000..58ad1bd --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources-5.12.0.dist-info/top_level.txt @@ -0,0 +1 @@ +importlib_resources diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/__init__.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_adapters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_adapters.py old mode 100755 new mode 100644 similarity index 97% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_adapters.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_adapters.py index ea363d8..50688fb --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_adapters.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_adapters.py @@ -34,9 +34,7 @@ def _io_wrapper(file, mode='r', *args, **kwargs): return TextIOWrapper(file, *args, **kwargs) elif mode == 'rb': return file - raise ValueError( - "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode) - ) + raise ValueError(f"Invalid mode value '{mode}', only 'r' and 'rb' are supported") class CompatibilityFiles: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_common.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_common.py new file mode 100644 index 0000000..3c6de1c --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_common.py @@ -0,0 +1,207 @@ +import os +import pathlib +import tempfile +import functools +import contextlib +import types +import importlib +import inspect +import warnings +import itertools + +from typing import Union, Optional, cast +from .abc import ResourceReader, Traversable + +from ._compat import wrap_spec + +Package = Union[types.ModuleType, str] +Anchor = Package + + +def package_to_anchor(func): + """ + Replace 'package' parameter as 'anchor' and warn about the change. + + Other errors should fall through. + + >>> files('a', 'b') + Traceback (most recent call last): + TypeError: files() takes from 0 to 1 positional arguments but 2 were given + """ + undefined = object() + + @functools.wraps(func) + def wrapper(anchor=undefined, package=undefined): + if package is not undefined: + if anchor is not undefined: + return func(anchor, package) + warnings.warn( + "First parameter to files is renamed to 'anchor'", + DeprecationWarning, + stacklevel=2, + ) + return func(package) + elif anchor is undefined: + return func() + return func(anchor) + + return wrapper + + +@package_to_anchor +def files(anchor: Optional[Anchor] = None) -> Traversable: + """ + Get a Traversable resource for an anchor. + """ + return from_package(resolve(anchor)) + + +def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: + """ + Return the package's loader if it's a ResourceReader. + """ + # We can't use + # a issubclass() check here because apparently abc.'s __subclasscheck__() + # hook wants to create a weak reference to the object, but + # zipimport.zipimporter does not support weak references, resulting in a + # TypeError. That seems terrible. + spec = package.__spec__ + reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore + if reader is None: + return None + return reader(spec.name) # type: ignore + + +@functools.singledispatch +def resolve(cand: Optional[Anchor]) -> types.ModuleType: + return cast(types.ModuleType, cand) + + +@resolve.register +def _(cand: str) -> types.ModuleType: + return importlib.import_module(cand) + + +@resolve.register +def _(cand: None) -> types.ModuleType: + return resolve(_infer_caller().f_globals['__name__']) + + +def _infer_caller(): + """ + Walk the stack and find the frame of the first caller not in this module. + """ + + def is_this_file(frame_info): + return frame_info.filename == __file__ + + def is_wrapper(frame_info): + return frame_info.function == 'wrapper' + + not_this_file = itertools.filterfalse(is_this_file, inspect.stack()) + # also exclude 'wrapper' due to singledispatch in the call stack + callers = itertools.filterfalse(is_wrapper, not_this_file) + return next(callers).frame + + +def from_package(package: types.ModuleType): + """ + Return a Traversable object for the given package. + + """ + spec = wrap_spec(package) + reader = spec.loader.get_resource_reader(spec.name) + return reader.files() + + +@contextlib.contextmanager +def _tempfile( + reader, + suffix='', + # gh-93353: Keep a reference to call os.remove() in late Python + # finalization. + *, + _os_remove=os.remove, +): + # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' + # blocks due to the need to close the temporary file to work on Windows + # properly. + fd, raw_path = tempfile.mkstemp(suffix=suffix) + try: + try: + os.write(fd, reader()) + finally: + os.close(fd) + del reader + yield pathlib.Path(raw_path) + finally: + try: + _os_remove(raw_path) + except FileNotFoundError: + pass + + +def _temp_file(path): + return _tempfile(path.read_bytes, suffix=path.name) + + +def _is_present_dir(path: Traversable) -> bool: + """ + Some Traversables implement ``is_dir()`` to raise an + exception (i.e. ``FileNotFoundError``) when the + directory doesn't exist. This function wraps that call + to always return a boolean and only return True + if there's a dir and it exists. + """ + with contextlib.suppress(FileNotFoundError): + return path.is_dir() + return False + + +@functools.singledispatch +def as_file(path): + """ + Given a Traversable object, return that object as a + path on the local file system in a context manager. + """ + return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) + + +@as_file.register(pathlib.Path) +@contextlib.contextmanager +def _(path): + """ + Degenerate behavior for pathlib.Path objects. + """ + yield path + + +@contextlib.contextmanager +def _temp_path(dir: tempfile.TemporaryDirectory): + """ + Wrap tempfile.TemporyDirectory to return a pathlib object. + """ + with dir as result: + yield pathlib.Path(result) + + +@contextlib.contextmanager +def _temp_dir(path): + """ + Given a traversable dir, recursively replicate the whole tree + to the file system in a context manager. + """ + assert path.is_dir() + with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: + yield _write_contents(temp_dir, path) + + +def _write_contents(target, source): + child = target.joinpath(source.name) + if source.is_dir(): + child.mkdir() + for item in source.iterdir(): + _write_contents(child, item) + else: + child.write_bytes(source.read_bytes()) + return child diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_compat.py old mode 100755 new mode 100644 similarity index 88% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_compat.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_compat.py index cb9fc82..a93a882 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_compat.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_compat.py @@ -1,14 +1,17 @@ # flake8: noqa import abc +import os import sys import pathlib from contextlib import suppress +from typing import Union + if sys.version_info >= (3, 10): from zipfile import Path as ZipPath # type: ignore else: - from ..zipp import Path as ZipPath # type: ignore + from zipp import Path as ZipPath # type: ignore try: @@ -69,9 +72,6 @@ def _file_reader(spec): return readers.FileReader(self) return ( - # native reader if it supplies 'files' - _native_reader(self.spec) - or # local ZipReader if a zip module _zip_reader(self.spec) or @@ -80,8 +80,12 @@ def _file_reader(spec): or # local FileReader _file_reader(self.spec) + or + # native reader if it supplies 'files' + _native_reader(self.spec) + or # fallback - adapt the spec ResourceReader to TraversableReader - or _adapters.CompatibilityFiles(self.spec) + _adapters.CompatibilityFiles(self.spec) ) @@ -96,3 +100,10 @@ def wrap_spec(package): from . import _adapters return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) + + +if sys.version_info >= (3, 9): + StrPath = Union[str, os.PathLike[str]] +else: + # PathLike is only subscriptable at runtime in 3.9+ + StrPath = Union[str, "os.PathLike[str]"] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_itertools.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_itertools.py new file mode 100644 index 0000000..7b775ef --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_itertools.py @@ -0,0 +1,38 @@ +# from more_itertools 9.0 +def only(iterable, default=None, too_long=None): + """If *iterable* has only one item, return it. + If it has zero items, return *default*. + If it has more than one item, raise the exception given by *too_long*, + which is ``ValueError`` by default. + >>> only([], default='missing') + 'missing' + >>> only([1]) + 1 + >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: Expected exactly one item in iterable, but got 1, 2, + and perhaps more.' + >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + TypeError + Note that :func:`only` attempts to advance *iterable* twice to ensure there + is only one item. See :func:`spy` or :func:`peekable` to check + iterable contents less destructively. + """ + it = iter(iterable) + first_value = next(it, default) + + try: + second_value = next(it) + except StopIteration: + pass + else: + msg = ( + 'Expected exactly one item in iterable, but got {!r}, {!r}, ' + 'and perhaps more.'.format(first_value, second_value) + ) + raise too_long or ValueError(msg) + + return first_value diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_legacy.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_legacy.py old mode 100755 new mode 100644 similarity index 98% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_legacy.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_legacy.py index 1d5d3f1..b1ea810 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_legacy.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/_legacy.py @@ -27,8 +27,7 @@ def wrapper(*args, **kwargs): return wrapper -def normalize_path(path): - # type: (Any) -> str +def normalize_path(path: Any) -> str: """Normalize a path by ensuring it is a string. If the resulting string contains path separators, an exception is raised. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/abc.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/abc.py old mode 100755 new mode 100644 similarity index 66% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/abc.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/abc.py index d39dc1a..23b6aea --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/abc.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/abc.py @@ -1,7 +1,13 @@ import abc -from typing import BinaryIO, Iterable, Text +import io +import itertools +import pathlib +from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional -from ._compat import runtime_checkable, Protocol +from ._compat import runtime_checkable, Protocol, StrPath + + +__all__ = ["ResourceReader", "Traversable", "TraversableResources"] class ResourceReader(metaclass=abc.ABCMeta): @@ -46,27 +52,34 @@ def contents(self) -> Iterable[str]: raise FileNotFoundError +class TraversalError(Exception): + pass + + @runtime_checkable class Traversable(Protocol): """ An object with a subset of pathlib.Path methods suitable for traversing directories and opening files. + + Any exceptions that occur when accessing the backing resource + may propagate unaltered. """ @abc.abstractmethod - def iterdir(self): + def iterdir(self) -> Iterator["Traversable"]: """ Yield Traversable objects in self """ - def read_bytes(self): + def read_bytes(self) -> bytes: """ Read contents of self as bytes """ with self.open('rb') as strm: return strm.read() - def read_text(self, encoding=None): + def read_text(self, encoding: Optional[str] = None) -> str: """ Read contents of self as text """ @@ -85,13 +98,32 @@ def is_file(self) -> bool: Return True if self is a file """ - @abc.abstractmethod - def joinpath(self, child): + def joinpath(self, *descendants: StrPath) -> "Traversable": """ - Return Traversable child in self + Return Traversable resolved with any descendants applied. + + Each descendant should be a path segment relative to self + and each may contain multiple levels separated by + ``posixpath.sep`` (``/``). """ + if not descendants: + return self + names = itertools.chain.from_iterable( + path.parts for path in map(pathlib.PurePosixPath, descendants) + ) + target = next(names) + matches = ( + traversable for traversable in self.iterdir() if traversable.name == target + ) + try: + match = next(matches) + except StopIteration: + raise TraversalError( + "Target not found during traversal.", target, list(names) + ) + return match.joinpath(*names) - def __truediv__(self, child): + def __truediv__(self, child: StrPath) -> "Traversable": """ Return Traversable child in self """ @@ -107,7 +139,8 @@ def open(self, mode='r', *args, **kwargs): accepted by io.TextIOWrapper. """ - @abc.abstractproperty + @property + @abc.abstractmethod def name(self) -> str: """ The base name of this object without any parent references. @@ -121,17 +154,17 @@ class TraversableResources(ResourceReader): """ @abc.abstractmethod - def files(self): + def files(self) -> "Traversable": """Return a Traversable object for the loaded package.""" - def open_resource(self, resource): + def open_resource(self, resource: StrPath) -> io.BufferedReader: return self.files().joinpath(resource).open('rb') - def resource_path(self, resource): + def resource_path(self, resource: Any) -> NoReturn: raise FileNotFoundError(resource) - def is_resource(self, path): + def is_resource(self, path: StrPath) -> bool: return self.files().joinpath(path).is_file() - def contents(self): + def contents(self) -> Iterator[str]: return (item.name for item in self.files().iterdir()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/py.typed old mode 100755 new mode 100644 similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/http/__init__.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/py.typed diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/readers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/readers.py old mode 100755 new mode 100644 similarity index 68% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/readers.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/readers.py index f1190ca..51d030a --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/readers.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/readers.py @@ -1,10 +1,11 @@ import collections +import itertools import pathlib import operator from . import abc -from ._itertools import unique_everseen +from ._itertools import only from ._compat import ZipPath @@ -41,8 +42,10 @@ def open_resource(self, resource): raise FileNotFoundError(exc.args[0]) def is_resource(self, path): - # workaround for `zipfile.Path.is_file` returning true - # for non-existent paths. + """ + Workaround for `zipfile.Path.is_file` returning true + for non-existent paths. + """ target = self.files().joinpath(path) return target.is_file() and target.exists() @@ -67,8 +70,10 @@ def __init__(self, *paths): raise NotADirectoryError('MultiplexedPath only supports directories') def iterdir(self): - files = (file for path in self._paths for file in path.iterdir()) - return unique_everseen(files, key=operator.attrgetter('name')) + children = (child for path in self._paths for child in path.iterdir()) + by_name = operator.attrgetter('name') + groups = itertools.groupby(sorted(children, key=by_name), key=by_name) + return map(self._follow, (locs for name, locs in groups)) def read_bytes(self): raise FileNotFoundError(f'{self} is not a file') @@ -82,15 +87,32 @@ def is_dir(self): def is_file(self): return False - def joinpath(self, child): - # first try to find child in current paths - for file in self.iterdir(): - if file.name == child: - return file - # if it does not exist, construct it with the first path - return self._paths[0] / child + def joinpath(self, *descendants): + try: + return super().joinpath(*descendants) + except abc.TraversalError: + # One of the paths did not resolve (a directory does not exist). + # Just return something that will not exist. + return self._paths[0].joinpath(*descendants) + + @classmethod + def _follow(cls, children): + """ + Construct a MultiplexedPath if needed. - __truediv__ = joinpath + If children contains a sole element, return it. + Otherwise, return a MultiplexedPath of the items. + Unless one of the items is not a Directory, then return the first. + """ + subdirs, one_dir, one_file = itertools.tee(children, 3) + + try: + return only(one_dir) + except ValueError: + try: + return cls(*subdirs) + except NotADirectoryError: + return next(one_file) def open(self, *args, **kwargs): raise FileNotFoundError(f'{self} is not a file') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/simple.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/simple.py old mode 100755 new mode 100644 similarity index 79% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/simple.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/simple.py index da073cb..7770c92 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/simple.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/simple.py @@ -16,31 +16,28 @@ class SimpleReader(abc.ABC): provider. """ - @abc.abstractproperty - def package(self): - # type: () -> str + @property + @abc.abstractmethod + def package(self) -> str: """ The name of the package for which this reader loads resources. """ @abc.abstractmethod - def children(self): - # type: () -> List['SimpleReader'] + def children(self) -> List['SimpleReader']: """ Obtain an iterable of SimpleReader for available child containers (e.g. directories). """ @abc.abstractmethod - def resources(self): - # type: () -> List[str] + def resources(self) -> List[str]: """ Obtain available named resources for this virtual package. """ @abc.abstractmethod - def open_binary(self, resource): - # type: (str) -> BinaryIO + def open_binary(self, resource: str) -> BinaryIO: """ Obtain a File-like for a named resource. """ @@ -50,39 +47,12 @@ def name(self): return self.package.split('.')[-1] -class ResourceHandle(Traversable): - """ - Handle to a named resource in a ResourceReader. - """ - - def __init__(self, parent, name): - # type: (ResourceContainer, str) -> None - self.parent = parent - self.name = name # type: ignore - - def is_file(self): - return True - - def is_dir(self): - return False - - def open(self, mode='r', *args, **kwargs): - stream = self.parent.reader.open_binary(self.name) - if 'b' not in mode: - stream = io.TextIOWrapper(*args, **kwargs) - return stream - - def joinpath(self, name): - raise RuntimeError("Cannot traverse into a resource") - - class ResourceContainer(Traversable): """ Traversable container for a package's resources via its reader. """ - def __init__(self, reader): - # type: (SimpleReader) -> None + def __init__(self, reader: SimpleReader): self.reader = reader def is_dir(self): @@ -99,10 +69,30 @@ def iterdir(self): def open(self, *args, **kwargs): raise IsADirectoryError() + +class ResourceHandle(Traversable): + """ + Handle to a named resource in a ResourceReader. + """ + + def __init__(self, parent: ResourceContainer, name: str): + self.parent = parent + self.name = name # type: ignore + + def is_file(self): + return True + + def is_dir(self): + return False + + def open(self, mode='r', *args, **kwargs): + stream = self.parent.reader.open_binary(self.name) + if 'b' not in mode: + stream = io.TextIOWrapper(*args, **kwargs) + return stream + def joinpath(self, name): - return next( - traversable for traversable in self.iterdir() if traversable.name == name - ) + raise RuntimeError("Cannot traverse into a resource") class TraversableReader(TraversableResources, SimpleReader): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/backports/urllib/__init__.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/_compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/_compat.py new file mode 100644 index 0000000..e7bf06d --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/_compat.py @@ -0,0 +1,32 @@ +import os + + +try: + from test.support import import_helper # type: ignore +except ImportError: + # Python 3.9 and earlier + class import_helper: # type: ignore + from test.support import ( + modules_setup, + modules_cleanup, + DirsOnSysPath, + CleanImport, + ) + + +try: + from test.support import os_helper # type: ignore +except ImportError: + # Python 3.9 compat + class os_helper: # type:ignore + from test.support import temp_dir + + +try: + # Python 3.10 + from test.support.os_helper import unlink +except ImportError: + from test.support import unlink as _unlink + + def unlink(target): + return _unlink(os.fspath(target)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/_path.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/_path.py new file mode 100644 index 0000000..1f97c96 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/_path.py @@ -0,0 +1,56 @@ +import pathlib +import functools + +from typing import Dict, Union + + +#### +# from jaraco.path 3.4.1 + +FilesSpec = Dict[str, Union[str, bytes, 'FilesSpec']] # type: ignore + + +def build(spec: FilesSpec, prefix=pathlib.Path()): + """ + Build a set of files/directories, as described by the spec. + + Each key represents a pathname, and the value represents + the content. Content may be a nested directory. + + >>> spec = { + ... 'README.txt': "A README file", + ... "foo": { + ... "__init__.py": "", + ... "bar": { + ... "__init__.py": "", + ... }, + ... "baz.py": "# Some code", + ... } + ... } + >>> target = getfixture('tmp_path') + >>> build(spec, target) + >>> target.joinpath('foo/baz.py').read_text(encoding='utf-8') + '# Some code' + """ + for name, contents in spec.items(): + create(contents, pathlib.Path(prefix) / name) + + +@functools.singledispatch +def create(content: Union[str, bytes, FilesSpec], path): + path.mkdir(exist_ok=True) + build(content, prefix=path) # type: ignore + + +@create.register +def _(content: bytes, path): + path.write_bytes(content) + + +@create.register +def _(content: str, path): + path.write_text(content, encoding='utf-8') + + +# end from jaraco.path +#### diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/moves/xmlrpc/__init__.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/binary.file b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/binary.file new file mode 100644 index 0000000000000000000000000000000000000000..eaf36c1daccfdf325514461cd1a2ffbc139b5464 GIT binary patch literal 4 LcmZQzWMT#Y01f~L literal 0 HcmV?d00001 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/tests/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/subdirectory/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future/tests/__init__.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/subdirectory/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/subdirectory/binary.file b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/subdirectory/binary.file new file mode 100644 index 0000000000000000000000000000000000000000..eaf36c1daccfdf325514461cd1a2ffbc139b5464 GIT binary patch literal 4 LcmZQzWMT#Y01f~L literal 0 HcmV?d00001 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/utf-16.file b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/utf-16.file new file mode 100644 index 0000000000000000000000000000000000000000..2cb772295ef4b480a8d83725bd5006a0236d8f68 GIT binary patch literal 44 ucmezW&x0YAAqNQa8FUyF7(y9B7~B|i84MZBfV^^`Xc15@g+Y;liva-T)Ce>H literal 0 HcmV?d00001 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/utf-8.file b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/utf-8.file new file mode 100644 index 0000000..1c0132a --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data01/utf-8.file @@ -0,0 +1 @@ +Hello, UTF-8 world! diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/__init__.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/one/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/__init__.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/one/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/one/resource1.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/one/resource1.txt new file mode 100644 index 0000000..61a813e --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/one/resource1.txt @@ -0,0 +1 @@ +one resource diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt new file mode 100644 index 0000000..48f587a --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt @@ -0,0 +1 @@ +a resource \ No newline at end of file diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/two/__init__.py similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/REQUESTED rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/two/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/two/resource2.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/two/resource2.txt new file mode 100644 index 0000000..a80ce46 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/data02/two/resource2.txt @@ -0,0 +1 @@ +two resource diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/binary.file b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/binary.file new file mode 100644 index 0000000000000000000000000000000000000000..eaf36c1daccfdf325514461cd1a2ffbc139b5464 GIT binary patch literal 4 LcmZQzWMT#Y01f~L literal 0 HcmV?d00001 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/utf-16.file b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/utf-16.file new file mode 100644 index 0000000000000000000000000000000000000000..2cb772295ef4b480a8d83725bd5006a0236d8f68 GIT binary patch literal 44 ucmezW&x0YAAqNQa8FUyF7(y9B7~B|i84MZBfV^^`Xc15@g+Y;liva-T)Ce>H literal 0 HcmV?d00001 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/utf-8.file b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/utf-8.file new file mode 100644 index 0000000..1c0132a --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/namespacedata01/utf-8.file @@ -0,0 +1 @@ +Hello, UTF-8 world! diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_compatibilty_files.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_compatibilty_files.py new file mode 100644 index 0000000..13ad0df --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_compatibilty_files.py @@ -0,0 +1,104 @@ +import io +import unittest + +import importlib_resources as resources + +from importlib_resources._adapters import ( + CompatibilityFiles, + wrap_spec, +) + +from . import util + + +class CompatibilityFilesTests(unittest.TestCase): + @property + def package(self): + bytes_data = io.BytesIO(b'Hello, world!') + return util.create_package( + file=bytes_data, + path='some_path', + contents=('a', 'b', 'c'), + ) + + @property + def files(self): + return resources.files(self.package) + + def test_spec_path_iter(self): + self.assertEqual( + sorted(path.name for path in self.files.iterdir()), + ['a', 'b', 'c'], + ) + + def test_child_path_iter(self): + self.assertEqual(list((self.files / 'a').iterdir()), []) + + def test_orphan_path_iter(self): + self.assertEqual(list((self.files / 'a' / 'a').iterdir()), []) + self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), []) + + def test_spec_path_is(self): + self.assertFalse(self.files.is_file()) + self.assertFalse(self.files.is_dir()) + + def test_child_path_is(self): + self.assertTrue((self.files / 'a').is_file()) + self.assertFalse((self.files / 'a').is_dir()) + + def test_orphan_path_is(self): + self.assertFalse((self.files / 'a' / 'a').is_file()) + self.assertFalse((self.files / 'a' / 'a').is_dir()) + self.assertFalse((self.files / 'a' / 'a' / 'a').is_file()) + self.assertFalse((self.files / 'a' / 'a' / 'a').is_dir()) + + def test_spec_path_name(self): + self.assertEqual(self.files.name, 'testingpackage') + + def test_child_path_name(self): + self.assertEqual((self.files / 'a').name, 'a') + + def test_orphan_path_name(self): + self.assertEqual((self.files / 'a' / 'b').name, 'b') + self.assertEqual((self.files / 'a' / 'b' / 'c').name, 'c') + + def test_spec_path_open(self): + self.assertEqual(self.files.read_bytes(), b'Hello, world!') + self.assertEqual(self.files.read_text(encoding='utf-8'), 'Hello, world!') + + def test_child_path_open(self): + self.assertEqual((self.files / 'a').read_bytes(), b'Hello, world!') + self.assertEqual( + (self.files / 'a').read_text(encoding='utf-8'), 'Hello, world!' + ) + + def test_orphan_path_open(self): + with self.assertRaises(FileNotFoundError): + (self.files / 'a' / 'b').read_bytes() + with self.assertRaises(FileNotFoundError): + (self.files / 'a' / 'b' / 'c').read_bytes() + + def test_open_invalid_mode(self): + with self.assertRaises(ValueError): + self.files.open('0') + + def test_orphan_path_invalid(self): + with self.assertRaises(ValueError): + CompatibilityFiles.OrphanPath() + + def test_wrap_spec(self): + spec = wrap_spec(self.package) + self.assertIsInstance(spec.loader.get_resource_reader(None), CompatibilityFiles) + + +class CompatibilityFilesNoReaderTests(unittest.TestCase): + @property + def package(self): + return util.create_package_from_loader(None) + + @property + def files(self): + return resources.files(self.package) + + def test_spec_path_joinpath(self): + self.assertIsInstance(self.files / 'a', CompatibilityFiles.OrphanPath) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_contents.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_contents.py new file mode 100644 index 0000000..525568e --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_contents.py @@ -0,0 +1,43 @@ +import unittest +import importlib_resources as resources + +from . import data01 +from . import util + + +class ContentsTests: + expected = { + '__init__.py', + 'binary.file', + 'subdirectory', + 'utf-16.file', + 'utf-8.file', + } + + def test_contents(self): + contents = {path.name for path in resources.files(self.data).iterdir()} + assert self.expected <= contents + + +class ContentsDiskTests(ContentsTests, unittest.TestCase): + def setUp(self): + self.data = data01 + + +class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase): + pass + + +class ContentsNamespaceTests(ContentsTests, unittest.TestCase): + expected = { + # no __init__ because of namespace design + # no subdirectory as incidental difference in fixture + 'binary.file', + 'utf-16.file', + 'utf-8.file', + } + + def setUp(self): + from . import namespacedata01 + + self.data = namespacedata01 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_custom.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_custom.py new file mode 100644 index 0000000..e85ddd6 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_custom.py @@ -0,0 +1,45 @@ +import unittest +import contextlib +import pathlib + +import importlib_resources as resources +from ..abc import TraversableResources, ResourceReader +from . import util +from ._compat import os_helper + + +class SimpleLoader: + """ + A simple loader that only implements a resource reader. + """ + + def __init__(self, reader: ResourceReader): + self.reader = reader + + def get_resource_reader(self, package): + return self.reader + + +class MagicResources(TraversableResources): + """ + Magically returns the resources at path. + """ + + def __init__(self, path: pathlib.Path): + self.path = path + + def files(self): + return self.path + + +class CustomTraversableResourcesTests(unittest.TestCase): + def setUp(self): + self.fixtures = contextlib.ExitStack() + self.addCleanup(self.fixtures.close) + + def test_custom_loader(self): + temp_dir = self.fixtures.enter_context(os_helper.temp_dir()) + loader = SimpleLoader(MagicResources(temp_dir)) + pkg = util.create_package_from_loader(loader) + files = resources.files(pkg) + assert files is temp_dir diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_files.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_files.py new file mode 100644 index 0000000..197a063 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_files.py @@ -0,0 +1,112 @@ +import typing +import textwrap +import unittest +import warnings +import importlib +import contextlib + +import importlib_resources as resources +from ..abc import Traversable +from . import data01 +from . import util +from . import _path +from ._compat import os_helper, import_helper + + +@contextlib.contextmanager +def suppress_known_deprecation(): + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter('default', category=DeprecationWarning) + yield ctx + + +class FilesTests: + def test_read_bytes(self): + files = resources.files(self.data) + actual = files.joinpath('utf-8.file').read_bytes() + assert actual == b'Hello, UTF-8 world!\n' + + def test_read_text(self): + files = resources.files(self.data) + actual = files.joinpath('utf-8.file').read_text(encoding='utf-8') + assert actual == 'Hello, UTF-8 world!\n' + + @unittest.skipUnless( + hasattr(typing, 'runtime_checkable'), + "Only suitable when typing supports runtime_checkable", + ) + def test_traversable(self): + assert isinstance(resources.files(self.data), Traversable) + + def test_old_parameter(self): + """ + Files used to take a 'package' parameter. Make sure anyone + passing by name is still supported. + """ + with suppress_known_deprecation(): + resources.files(package=self.data) + + +class OpenDiskTests(FilesTests, unittest.TestCase): + def setUp(self): + self.data = data01 + + +class OpenZipTests(FilesTests, util.ZipSetup, unittest.TestCase): + pass + + +class OpenNamespaceTests(FilesTests, unittest.TestCase): + def setUp(self): + from . import namespacedata01 + + self.data = namespacedata01 + + +class SiteDir: + def setUp(self): + self.fixtures = contextlib.ExitStack() + self.addCleanup(self.fixtures.close) + self.site_dir = self.fixtures.enter_context(os_helper.temp_dir()) + self.fixtures.enter_context(import_helper.DirsOnSysPath(self.site_dir)) + self.fixtures.enter_context(import_helper.CleanImport()) + + +class ModulesFilesTests(SiteDir, unittest.TestCase): + def test_module_resources(self): + """ + A module can have resources found adjacent to the module. + """ + spec = { + 'mod.py': '', + 'res.txt': 'resources are the best', + } + _path.build(spec, self.site_dir) + import mod + + actual = resources.files(mod).joinpath('res.txt').read_text(encoding='utf-8') + assert actual == spec['res.txt'] + + +class ImplicitContextFilesTests(SiteDir, unittest.TestCase): + def test_implicit_files(self): + """ + Without any parameter, files() will infer the location as the caller. + """ + spec = { + 'somepkg': { + '__init__.py': textwrap.dedent( + """ + import importlib_resources as res + val = res.files().joinpath('res.txt').read_text(encoding='utf-8') + """ + ), + 'res.txt': 'resources are the best', + }, + } + _path.build(spec, self.site_dir) + assert importlib.import_module('somepkg').val == 'resources are the best' + + +if __name__ == '__main__': + unittest.main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_open.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_open.py new file mode 100644 index 0000000..83b737d --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_open.py @@ -0,0 +1,85 @@ +import unittest + +import importlib_resources as resources +from . import data01 +from . import util + + +class CommonBinaryTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + target = resources.files(package).joinpath(path) + with target.open('rb'): + pass + + +class CommonTextTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + target = resources.files(package).joinpath(path) + with target.open(encoding='utf-8'): + pass + + +class OpenTests: + def test_open_binary(self): + target = resources.files(self.data) / 'binary.file' + with target.open('rb') as fp: + result = fp.read() + self.assertEqual(result, b'\x00\x01\x02\x03') + + def test_open_text_default_encoding(self): + target = resources.files(self.data) / 'utf-8.file' + with target.open(encoding='utf-8') as fp: + result = fp.read() + self.assertEqual(result, 'Hello, UTF-8 world!\n') + + def test_open_text_given_encoding(self): + target = resources.files(self.data) / 'utf-16.file' + with target.open(encoding='utf-16', errors='strict') as fp: + result = fp.read() + self.assertEqual(result, 'Hello, UTF-16 world!\n') + + def test_open_text_with_errors(self): + """ + Raises UnicodeError without the 'errors' argument. + """ + target = resources.files(self.data) / 'utf-16.file' + with target.open(encoding='utf-8', errors='strict') as fp: + self.assertRaises(UnicodeError, fp.read) + with target.open(encoding='utf-8', errors='ignore') as fp: + result = fp.read() + self.assertEqual( + result, + 'H\x00e\x00l\x00l\x00o\x00,\x00 ' + '\x00U\x00T\x00F\x00-\x001\x006\x00 ' + '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00', + ) + + def test_open_binary_FileNotFoundError(self): + target = resources.files(self.data) / 'does-not-exist' + with self.assertRaises(FileNotFoundError): + target.open('rb') + + def test_open_text_FileNotFoundError(self): + target = resources.files(self.data) / 'does-not-exist' + with self.assertRaises(FileNotFoundError): + target.open(encoding='utf-8') + + +class OpenDiskTests(OpenTests, unittest.TestCase): + def setUp(self): + self.data = data01 + + +class OpenDiskNamespaceTests(OpenTests, unittest.TestCase): + def setUp(self): + from . import namespacedata01 + + self.data = namespacedata01 + + +class OpenZipTests(OpenTests, util.ZipSetup, unittest.TestCase): + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_path.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_path.py new file mode 100644 index 0000000..7cb2000 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_path.py @@ -0,0 +1,69 @@ +import io +import unittest + +import importlib_resources as resources +from . import data01 +from . import util + + +class CommonTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + with resources.as_file(resources.files(package).joinpath(path)): + pass + + +class PathTests: + def test_reading(self): + """ + Path should be readable. + + Test also implicitly verifies the returned object is a pathlib.Path + instance. + """ + target = resources.files(self.data) / 'utf-8.file' + with resources.as_file(target) as path: + self.assertTrue(path.name.endswith("utf-8.file"), repr(path)) + # pathlib.Path.read_text() was introduced in Python 3.5. + with path.open('r', encoding='utf-8') as file: + text = file.read() + self.assertEqual('Hello, UTF-8 world!\n', text) + + +class PathDiskTests(PathTests, unittest.TestCase): + data = data01 + + def test_natural_path(self): + """ + Guarantee the internal implementation detail that + file-system-backed resources do not get the tempdir + treatment. + """ + target = resources.files(self.data) / 'utf-8.file' + with resources.as_file(target) as path: + assert 'data' in str(path) + + +class PathMemoryTests(PathTests, unittest.TestCase): + def setUp(self): + file = io.BytesIO(b'Hello, UTF-8 world!\n') + self.addCleanup(file.close) + self.data = util.create_package( + file=file, path=FileNotFoundError("package exists only in memory") + ) + self.data.__spec__.origin = None + self.data.__spec__.has_location = False + + +class PathZipTests(PathTests, util.ZipSetup, unittest.TestCase): + def test_remove_in_context_manager(self): + """ + It is not an error if the file that was temporarily stashed on the + file system is removed inside the `with` stanza. + """ + target = resources.files(self.data) / 'utf-8.file' + with resources.as_file(target) as path: + path.unlink() + + +if __name__ == '__main__': + unittest.main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_read.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_read.py new file mode 100644 index 0000000..b5aaec4 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_read.py @@ -0,0 +1,82 @@ +import unittest +import importlib_resources as resources + +from . import data01 +from . import util +from importlib import import_module + + +class CommonBinaryTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + resources.files(package).joinpath(path).read_bytes() + + +class CommonTextTests(util.CommonTests, unittest.TestCase): + def execute(self, package, path): + resources.files(package).joinpath(path).read_text(encoding='utf-8') + + +class ReadTests: + def test_read_bytes(self): + result = resources.files(self.data).joinpath('binary.file').read_bytes() + self.assertEqual(result, b'\0\1\2\3') + + def test_read_text_default_encoding(self): + result = ( + resources.files(self.data) + .joinpath('utf-8.file') + .read_text(encoding='utf-8') + ) + self.assertEqual(result, 'Hello, UTF-8 world!\n') + + def test_read_text_given_encoding(self): + result = ( + resources.files(self.data) + .joinpath('utf-16.file') + .read_text(encoding='utf-16') + ) + self.assertEqual(result, 'Hello, UTF-16 world!\n') + + def test_read_text_with_errors(self): + """ + Raises UnicodeError without the 'errors' argument. + """ + target = resources.files(self.data) / 'utf-16.file' + self.assertRaises(UnicodeError, target.read_text, encoding='utf-8') + result = target.read_text(encoding='utf-8', errors='ignore') + self.assertEqual( + result, + 'H\x00e\x00l\x00l\x00o\x00,\x00 ' + '\x00U\x00T\x00F\x00-\x001\x006\x00 ' + '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00', + ) + + +class ReadDiskTests(ReadTests, unittest.TestCase): + data = data01 + + +class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase): + def test_read_submodule_resource(self): + submodule = import_module('ziptestdata.subdirectory') + result = resources.files(submodule).joinpath('binary.file').read_bytes() + self.assertEqual(result, b'\0\1\2\3') + + def test_read_submodule_resource_by_name(self): + result = ( + resources.files('ziptestdata.subdirectory') + .joinpath('binary.file') + .read_bytes() + ) + self.assertEqual(result, b'\0\1\2\3') + + +class ReadNamespaceTests(ReadTests, unittest.TestCase): + def setUp(self): + from . import namespacedata01 + + self.data = namespacedata01 + + +if __name__ == '__main__': + unittest.main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_reader.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_reader.py new file mode 100644 index 0000000..e2bdf19 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_reader.py @@ -0,0 +1,144 @@ +import os.path +import sys +import pathlib +import unittest + +from importlib import import_module +from importlib_resources.readers import MultiplexedPath, NamespaceReader + + +class MultiplexedPathTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + path = pathlib.Path(__file__).parent / 'namespacedata01' + cls.folder = str(path) + + def test_init_no_paths(self): + with self.assertRaises(FileNotFoundError): + MultiplexedPath() + + def test_init_file(self): + with self.assertRaises(NotADirectoryError): + MultiplexedPath(os.path.join(self.folder, 'binary.file')) + + def test_iterdir(self): + contents = {path.name for path in MultiplexedPath(self.folder).iterdir()} + try: + contents.remove('__pycache__') + except (KeyError, ValueError): + pass + self.assertEqual(contents, {'binary.file', 'utf-16.file', 'utf-8.file'}) + + def test_iterdir_duplicate(self): + data01 = os.path.abspath(os.path.join(__file__, '..', 'data01')) + contents = { + path.name for path in MultiplexedPath(self.folder, data01).iterdir() + } + for remove in ('__pycache__', '__init__.pyc'): + try: + contents.remove(remove) + except (KeyError, ValueError): + pass + self.assertEqual( + contents, + {'__init__.py', 'binary.file', 'subdirectory', 'utf-16.file', 'utf-8.file'}, + ) + + def test_is_dir(self): + self.assertEqual(MultiplexedPath(self.folder).is_dir(), True) + + def test_is_file(self): + self.assertEqual(MultiplexedPath(self.folder).is_file(), False) + + def test_open_file(self): + path = MultiplexedPath(self.folder) + with self.assertRaises(FileNotFoundError): + path.read_bytes() + with self.assertRaises(FileNotFoundError): + path.read_text() + with self.assertRaises(FileNotFoundError): + path.open() + + def test_join_path(self): + prefix = os.path.abspath(os.path.join(__file__, '..')) + data01 = os.path.join(prefix, 'data01') + path = MultiplexedPath(self.folder, data01) + self.assertEqual( + str(path.joinpath('binary.file'))[len(prefix) + 1 :], + os.path.join('namespacedata01', 'binary.file'), + ) + self.assertEqual( + str(path.joinpath('subdirectory'))[len(prefix) + 1 :], + os.path.join('data01', 'subdirectory'), + ) + self.assertEqual( + str(path.joinpath('imaginary'))[len(prefix) + 1 :], + os.path.join('namespacedata01', 'imaginary'), + ) + self.assertEqual(path.joinpath(), path) + + def test_join_path_compound(self): + path = MultiplexedPath(self.folder) + assert not path.joinpath('imaginary/foo.py').exists() + + def test_join_path_common_subdir(self): + prefix = os.path.abspath(os.path.join(__file__, '..')) + data01 = os.path.join(prefix, 'data01') + data02 = os.path.join(prefix, 'data02') + path = MultiplexedPath(data01, data02) + self.assertIsInstance(path.joinpath('subdirectory'), MultiplexedPath) + self.assertEqual( + str(path.joinpath('subdirectory', 'subsubdir'))[len(prefix) + 1 :], + os.path.join('data02', 'subdirectory', 'subsubdir'), + ) + + def test_repr(self): + self.assertEqual( + repr(MultiplexedPath(self.folder)), + f"MultiplexedPath('{self.folder}')", + ) + + def test_name(self): + self.assertEqual( + MultiplexedPath(self.folder).name, + os.path.basename(self.folder), + ) + + +class NamespaceReaderTest(unittest.TestCase): + site_dir = str(pathlib.Path(__file__).parent) + + @classmethod + def setUpClass(cls): + sys.path.append(cls.site_dir) + + @classmethod + def tearDownClass(cls): + sys.path.remove(cls.site_dir) + + def test_init_error(self): + with self.assertRaises(ValueError): + NamespaceReader(['path1', 'path2']) + + def test_resource_path(self): + namespacedata01 = import_module('namespacedata01') + reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations) + + root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01')) + self.assertEqual( + reader.resource_path('binary.file'), os.path.join(root, 'binary.file') + ) + self.assertEqual( + reader.resource_path('imaginary'), os.path.join(root, 'imaginary') + ) + + def test_files(self): + namespacedata01 = import_module('namespacedata01') + reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations) + root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01')) + self.assertIsInstance(reader.files(), MultiplexedPath) + self.assertEqual(repr(reader.files()), f"MultiplexedPath('{root}')") + + +if __name__ == '__main__': + unittest.main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_resource.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_resource.py new file mode 100644 index 0000000..677110c --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/test_resource.py @@ -0,0 +1,252 @@ +import contextlib +import sys +import unittest +import importlib_resources as resources +import uuid +import pathlib + +from . import data01 +from . import zipdata01, zipdata02 +from . import util +from importlib import import_module +from ._compat import import_helper, os_helper, unlink + + +class ResourceTests: + # Subclasses are expected to set the `data` attribute. + + def test_is_file_exists(self): + target = resources.files(self.data) / 'binary.file' + self.assertTrue(target.is_file()) + + def test_is_file_missing(self): + target = resources.files(self.data) / 'not-a-file' + self.assertFalse(target.is_file()) + + def test_is_dir(self): + target = resources.files(self.data) / 'subdirectory' + self.assertFalse(target.is_file()) + self.assertTrue(target.is_dir()) + + +class ResourceDiskTests(ResourceTests, unittest.TestCase): + def setUp(self): + self.data = data01 + + +class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase): + pass + + +def names(traversable): + return {item.name for item in traversable.iterdir()} + + +class ResourceLoaderTests(unittest.TestCase): + def test_resource_contents(self): + package = util.create_package( + file=data01, path=data01.__file__, contents=['A', 'B', 'C'] + ) + self.assertEqual(names(resources.files(package)), {'A', 'B', 'C'}) + + def test_is_file(self): + package = util.create_package( + file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] + ) + self.assertTrue(resources.files(package).joinpath('B').is_file()) + + def test_is_dir(self): + package = util.create_package( + file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] + ) + self.assertTrue(resources.files(package).joinpath('D').is_dir()) + + def test_resource_missing(self): + package = util.create_package( + file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F'] + ) + self.assertFalse(resources.files(package).joinpath('Z').is_file()) + + +class ResourceCornerCaseTests(unittest.TestCase): + def test_package_has_no_reader_fallback(self): + """ + Test odd ball packages which: + # 1. Do not have a ResourceReader as a loader + # 2. Are not on the file system + # 3. Are not in a zip file + """ + module = util.create_package( + file=data01, path=data01.__file__, contents=['A', 'B', 'C'] + ) + # Give the module a dummy loader. + module.__loader__ = object() + # Give the module a dummy origin. + module.__file__ = '/path/which/shall/not/be/named' + module.__spec__.loader = module.__loader__ + module.__spec__.origin = module.__file__ + self.assertFalse(resources.files(module).joinpath('A').is_file()) + + +class ResourceFromZipsTest01(util.ZipSetupBase, unittest.TestCase): + ZIP_MODULE = zipdata01 # type: ignore + + def test_is_submodule_resource(self): + submodule = import_module('ziptestdata.subdirectory') + self.assertTrue(resources.files(submodule).joinpath('binary.file').is_file()) + + def test_read_submodule_resource_by_name(self): + self.assertTrue( + resources.files('ziptestdata.subdirectory') + .joinpath('binary.file') + .is_file() + ) + + def test_submodule_contents(self): + submodule = import_module('ziptestdata.subdirectory') + self.assertEqual( + names(resources.files(submodule)), {'__init__.py', 'binary.file'} + ) + + def test_submodule_contents_by_name(self): + self.assertEqual( + names(resources.files('ziptestdata.subdirectory')), + {'__init__.py', 'binary.file'}, + ) + + def test_as_file_directory(self): + with resources.as_file(resources.files('ziptestdata')) as data: + assert data.name == 'ziptestdata' + assert data.is_dir() + assert data.joinpath('subdirectory').is_dir() + assert len(list(data.iterdir())) + assert not data.parent.exists() + + +class ResourceFromZipsTest02(util.ZipSetupBase, unittest.TestCase): + ZIP_MODULE = zipdata02 # type: ignore + + def test_unrelated_contents(self): + """ + Test thata zip with two unrelated subpackages return + distinct resources. Ref python/importlib_resources#44. + """ + self.assertEqual( + names(resources.files('ziptestdata.one')), + {'__init__.py', 'resource1.txt'}, + ) + self.assertEqual( + names(resources.files('ziptestdata.two')), + {'__init__.py', 'resource2.txt'}, + ) + + +@contextlib.contextmanager +def zip_on_path(dir): + data_path = pathlib.Path(zipdata01.__file__) + source_zip_path = data_path.parent.joinpath('ziptestdata.zip') + zip_path = pathlib.Path(dir) / f'{uuid.uuid4()}.zip' + zip_path.write_bytes(source_zip_path.read_bytes()) + sys.path.append(str(zip_path)) + import_module('ziptestdata') + + try: + yield + finally: + with contextlib.suppress(ValueError): + sys.path.remove(str(zip_path)) + + with contextlib.suppress(KeyError): + del sys.path_importer_cache[str(zip_path)] + del sys.modules['ziptestdata'] + + with contextlib.suppress(OSError): + unlink(zip_path) + + +class DeletingZipsTest(unittest.TestCase): + """Having accessed resources in a zip file should not keep an open + reference to the zip. + """ + + def setUp(self): + self.fixtures = contextlib.ExitStack() + self.addCleanup(self.fixtures.close) + + modules = import_helper.modules_setup() + self.addCleanup(import_helper.modules_cleanup, *modules) + + temp_dir = self.fixtures.enter_context(os_helper.temp_dir()) + self.fixtures.enter_context(zip_on_path(temp_dir)) + + def test_iterdir_does_not_keep_open(self): + [item.name for item in resources.files('ziptestdata').iterdir()] + + def test_is_file_does_not_keep_open(self): + resources.files('ziptestdata').joinpath('binary.file').is_file() + + def test_is_file_failure_does_not_keep_open(self): + resources.files('ziptestdata').joinpath('not-present').is_file() + + @unittest.skip("Desired but not supported.") + def test_as_file_does_not_keep_open(self): # pragma: no cover + resources.as_file(resources.files('ziptestdata') / 'binary.file') + + def test_entered_path_does_not_keep_open(self): + """ + Mimic what certifi does on import to make its bundle + available for the process duration. + """ + resources.as_file(resources.files('ziptestdata') / 'binary.file').__enter__() + + def test_read_binary_does_not_keep_open(self): + resources.files('ziptestdata').joinpath('binary.file').read_bytes() + + def test_read_text_does_not_keep_open(self): + resources.files('ziptestdata').joinpath('utf-8.file').read_text( + encoding='utf-8' + ) + + +class ResourceFromNamespaceTest01(unittest.TestCase): + site_dir = str(pathlib.Path(__file__).parent) + + @classmethod + def setUpClass(cls): + sys.path.append(cls.site_dir) + + @classmethod + def tearDownClass(cls): + sys.path.remove(cls.site_dir) + + def test_is_submodule_resource(self): + self.assertTrue( + resources.files(import_module('namespacedata01')) + .joinpath('binary.file') + .is_file() + ) + + def test_read_submodule_resource_by_name(self): + self.assertTrue( + resources.files('namespacedata01').joinpath('binary.file').is_file() + ) + + def test_submodule_contents(self): + contents = names(resources.files(import_module('namespacedata01'))) + try: + contents.remove('__pycache__') + except KeyError: + pass + self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'}) + + def test_submodule_contents_by_name(self): + contents = names(resources.files('namespacedata01')) + try: + contents.remove('__pycache__') + except KeyError: + pass + self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'}) + + +if __name__ == '__main__': + unittest.main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/update-zips.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/update-zips.py new file mode 100644 index 0000000..231334a --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/update-zips.py @@ -0,0 +1,53 @@ +""" +Generate the zip test data files. + +Run to build the tests/zipdataNN/ziptestdata.zip files from +files in tests/dataNN. + +Replaces the file with the working copy, but does commit anything +to the source repo. +""" + +import contextlib +import os +import pathlib +import zipfile + + +def main(): + """ + >>> from unittest import mock + >>> monkeypatch = getfixture('monkeypatch') + >>> monkeypatch.setattr(zipfile, 'ZipFile', mock.MagicMock()) + >>> print(); main() # print workaround for bpo-32509 + + ...data01... -> ziptestdata/... + ... + ...data02... -> ziptestdata/... + ... + """ + suffixes = '01', '02' + tuple(map(generate, suffixes)) + + +def generate(suffix): + root = pathlib.Path(__file__).parent.relative_to(os.getcwd()) + zfpath = root / f'zipdata{suffix}/ziptestdata.zip' + with zipfile.ZipFile(zfpath, 'w') as zf: + for src, rel in walk(root / f'data{suffix}'): + dst = 'ziptestdata' / pathlib.PurePosixPath(rel.as_posix()) + print(src, '->', dst) + zf.write(src, dst) + + +def walk(datapath): + for dirpath, dirnames, filenames in os.walk(datapath): + with contextlib.suppress(ValueError): + dirnames.remove('__pycache__') + for filename in filenames: + res = pathlib.Path(dirpath) / filename + rel = res.relative_to(datapath) + yield res, rel + + +__name__ == '__main__' and main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/util.py new file mode 100644 index 0000000..ce0e6fa --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/util.py @@ -0,0 +1,179 @@ +import abc +import importlib +import io +import sys +import types +import pathlib + +from . import data01 +from . import zipdata01 +from ..abc import ResourceReader +from ._compat import import_helper + + +from importlib.machinery import ModuleSpec + + +class Reader(ResourceReader): + def __init__(self, **kwargs): + vars(self).update(kwargs) + + def get_resource_reader(self, package): + return self + + def open_resource(self, path): + self._path = path + if isinstance(self.file, Exception): + raise self.file + return self.file + + def resource_path(self, path_): + self._path = path_ + if isinstance(self.path, Exception): + raise self.path + return self.path + + def is_resource(self, path_): + self._path = path_ + if isinstance(self.path, Exception): + raise self.path + + def part(entry): + return entry.split('/') + + return any( + len(parts) == 1 and parts[0] == path_ for parts in map(part, self._contents) + ) + + def contents(self): + if isinstance(self.path, Exception): + raise self.path + yield from self._contents + + +def create_package_from_loader(loader, is_package=True): + name = 'testingpackage' + module = types.ModuleType(name) + spec = ModuleSpec(name, loader, origin='does-not-exist', is_package=is_package) + module.__spec__ = spec + module.__loader__ = loader + return module + + +def create_package(file=None, path=None, is_package=True, contents=()): + return create_package_from_loader( + Reader(file=file, path=path, _contents=contents), + is_package, + ) + + +class CommonTests(metaclass=abc.ABCMeta): + """ + Tests shared by test_open, test_path, and test_read. + """ + + @abc.abstractmethod + def execute(self, package, path): + """ + Call the pertinent legacy API function (e.g. open_text, path) + on package and path. + """ + + def test_package_name(self): + """ + Passing in the package name should succeed. + """ + self.execute(data01.__name__, 'utf-8.file') + + def test_package_object(self): + """ + Passing in the package itself should succeed. + """ + self.execute(data01, 'utf-8.file') + + def test_string_path(self): + """ + Passing in a string for the path should succeed. + """ + path = 'utf-8.file' + self.execute(data01, path) + + def test_pathlib_path(self): + """ + Passing in a pathlib.PurePath object for the path should succeed. + """ + path = pathlib.PurePath('utf-8.file') + self.execute(data01, path) + + def test_importing_module_as_side_effect(self): + """ + The anchor package can already be imported. + """ + del sys.modules[data01.__name__] + self.execute(data01.__name__, 'utf-8.file') + + def test_missing_path(self): + """ + Attempting to open or read or request the path for a + non-existent path should succeed if open_resource + can return a viable data stream. + """ + bytes_data = io.BytesIO(b'Hello, world!') + package = create_package(file=bytes_data, path=FileNotFoundError()) + self.execute(package, 'utf-8.file') + self.assertEqual(package.__loader__._path, 'utf-8.file') + + def test_extant_path(self): + # Attempting to open or read or request the path when the + # path does exist should still succeed. Does not assert + # anything about the result. + bytes_data = io.BytesIO(b'Hello, world!') + # any path that exists + path = __file__ + package = create_package(file=bytes_data, path=path) + self.execute(package, 'utf-8.file') + self.assertEqual(package.__loader__._path, 'utf-8.file') + + def test_useless_loader(self): + package = create_package(file=FileNotFoundError(), path=FileNotFoundError()) + with self.assertRaises(FileNotFoundError): + self.execute(package, 'utf-8.file') + + +class ZipSetupBase: + ZIP_MODULE = None + + @classmethod + def setUpClass(cls): + data_path = pathlib.Path(cls.ZIP_MODULE.__file__) + data_dir = data_path.parent + cls._zip_path = str(data_dir / 'ziptestdata.zip') + sys.path.append(cls._zip_path) + cls.data = importlib.import_module('ziptestdata') + + @classmethod + def tearDownClass(cls): + try: + sys.path.remove(cls._zip_path) + except ValueError: + pass + + try: + del sys.path_importer_cache[cls._zip_path] + del sys.modules[cls.data.__name__] + except KeyError: + pass + + try: + del cls.data + del cls._zip_path + except AttributeError: + pass + + def setUp(self): + modules = import_helper.modules_setup() + self.addCleanup(import_helper.modules_cleanup, *modules) + + +class ZipSetup(ZipSetupBase): + ZIP_MODULE = zipdata01 # type: ignore diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata01/__init__.py similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/REQUESTED rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata01/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata01/ziptestdata.zip b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata01/ziptestdata.zip new file mode 100644 index 0000000000000000000000000000000000000000..9a3bb0739f87e97c1084b94d7d153680f6727738 GIT binary patch literal 876 zcmWIWW@Zs#00HOCX@Q%&m27l?Y!DU);;PJolGNgol*E!m{nC;&T|+ayw9K5;|NlG~ zQWMD z9;rDw`8o=rA#S=B3g!7lIVp-}COK17UPc zNtt;*xhM-3R!jMEPhCreO-3*u>5Df}T7+BJ{639e$2uhfsIs`pJ5Qf}C xGXyDE@VNvOv@o!wQJfLgCAgysx3f@9jKpUmiW^zkK<;1z!tFpk^MROw0RS~O%0&PG literal 0 HcmV?d00001 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata02/__init__.py similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/REQUESTED rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata02/__init__.py diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata02/ziptestdata.zip b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_resources/tests/zipdata02/ziptestdata.zip new file mode 100644 index 0000000000000000000000000000000000000000..d63ff512d2807ef2fd259455283b81b02e0e45fb GIT binary patch literal 698 zcmWIWW@Zs#00HOCX@Ot{ln@8fRhb1Psl_EJi6x2p@$s2?nI-Y@dIgmMI5kP5Y0A$_ z#jWw|&p#`9ff_(q7K_HB)Z+ZoqU2OVy^@L&ph*fa0WRVlP*R?c+X1opI-R&20MZDv z&j{oIpa8N17@0(vaR(gGH(;=&5k%n(M%;#g0ulz6G@1gL$cA79E2=^00gEsw4~s!C zUxI@ZWaIMqz|BszK;s4KsL2<9jRy!Q2E6`2cTLHjr{wAk1ZCU@!+_ G1_l6Bc%f?m literal 0 HcmV?d00001 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/_compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/_compat.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/_identifier.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/_identifier.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/asyncfilters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/asyncfilters.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/asyncsupport.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/asyncsupport.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/bccache.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/bccache.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/compiler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/compiler.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/constants.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/constants.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/debug.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/debug.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/defaults.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/defaults.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/environment.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/environment.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/ext.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/ext.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/filters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/filters.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/idtracking.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/idtracking.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/lexer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/lexer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/loaders.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/loaders.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/meta.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/meta.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/nativetypes.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/nativetypes.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/nodes.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/nodes.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/optimizer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/optimizer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/parser.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/runtime.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/runtime.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/sandbox.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/sandbox.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/tests.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/tests.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/utils.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/visitor.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jinja2/visitor.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl-0.2.4.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl-0.2.4.dist-info/RECORD index a77293d..3487a2e 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl-0.2.4.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl-0.2.4.dist-info/RECORD @@ -3,7 +3,7 @@ jsl-0.2.4.dist-info/LICENSE,sha256=SOcEeWBevO0ulaPJ1ZbwQHJrPKCqwjXQ6tUAo_LrYmE,1 jsl-0.2.4.dist-info/METADATA,sha256=Hoxdaj4BiIHCDAJfABf-ZKi-mpICjqGFzCXuG6Ve-ag,3655 jsl-0.2.4.dist-info/RECORD,, jsl-0.2.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -jsl-0.2.4.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +jsl-0.2.4.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 jsl-0.2.4.dist-info/top_level.txt,sha256=5pe24rVnpqKlN1x1SAB6vpgck0EQXzb1HD-STjLrCXE,4 jsl/__init__.py,sha256=Hav-h60_x4Z_1-SPwtgXo6HFmv2TAay_3xWfootT8Hg,536 jsl/_compat/__init__.py,sha256=Wfz6h02kZsiVvzNdZALq2RTa7XpEbeAjeiE8CFTzC8Y,2118 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl-0.2.4.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl-0.2.4.dist-info/WHEEL index becc9a6..1f37c02 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl-0.2.4.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl-0.2.4.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) +Generator: bdist_wheel (0.40.0) Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/ordereddict.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/ordereddict.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/prepareable.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/_compat/prepareable.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/document.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/document.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/base.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/base.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/compound.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/compound.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/primitive.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/primitive.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/fields/util.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/registry.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/registry.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/resolutionscope.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/resolutionscope.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/roles.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsl/roles.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/comments.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/comments.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/wrapper.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsoncomment/wrapper.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/bin/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/bin/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/bin/jsonpath.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/bin/jsonpath.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/arithmetic.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/arithmetic.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/filter.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/filter.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/iterable.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/iterable.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/parser.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/string.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/ext/string.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/jsonpath.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/jsonpath.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/lexer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/lexer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_ng/parser.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw-1.4.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw-1.4.0.dist-info/RECORD index 033b725..732ea70 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw-1.4.0.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw-1.4.0.dist-info/RECORD @@ -3,7 +3,7 @@ jsonpath_rw-1.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7z jsonpath_rw-1.4.0.dist-info/METADATA,sha256=YpILgtomSGnBnMkWAnoiCjCltr_Xg-NRUASouddqa5I,13258 jsonpath_rw-1.4.0.dist-info/RECORD,, jsonpath_rw-1.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -jsonpath_rw-1.4.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +jsonpath_rw-1.4.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 jsonpath_rw-1.4.0.dist-info/entry_points.txt,sha256=rhhrY2M1qcBqXZdcsE7b3WzZYR9K7jqG8JZ_BQBj3OA,70 jsonpath_rw-1.4.0.dist-info/top_level.txt,sha256=ZkYenrz7C0bQMlN3Nn0VYMWQc2Vm5zfcGQTOJowecEA,12 jsonpath_rw/__init__.py,sha256=ptcHrCa_lKTgP_OLUyEeiFSfolhQG5wsxzAckU7OZSY,73 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw-1.4.0.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw-1.4.0.dist-info/WHEEL index becc9a6..1f37c02 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw-1.4.0.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw-1.4.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) +Generator: bdist_wheel (0.40.0) Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/bin/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/bin/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/bin/jsonpath.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/bin/jsonpath.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/jsonpath.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/jsonpath.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/lexer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/lexer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonpath_rw/parser.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/RECORD deleted file mode 100644 index 7cd9555..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/RECORD +++ /dev/null @@ -1,37 +0,0 @@ -../../bin/jsonschema,sha256=lxBKiFj4zfSEeher0xvOrFwGZp6m2vSW1P8sy-V0baA,213 -jsonschema-3.2.0.dist-info/COPYING,sha256=T5KgFaE8TRoEC-8BiqE0MLTxvHO0Gxa7hGw0Z2bedDk,1057 -jsonschema-3.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -jsonschema-3.2.0.dist-info/METADATA,sha256=os_TL7tiSfPYDMKYoAqoNsw_yMkDJmCL2bqhp-csNR0,7760 -jsonschema-3.2.0.dist-info/RECORD,, -jsonschema-3.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -jsonschema-3.2.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110 -jsonschema-3.2.0.dist-info/entry_points.txt,sha256=KaVUBBSLyzi5naUkVg-r3q6T_igdLgaHY6Mm3oLX73s,52 -jsonschema-3.2.0.dist-info/top_level.txt,sha256=jGoNS61vDONU8U7p0Taf-y_8JVG1Z2CJ5Eif6zMN_cw,11 -jsonschema/__init__.py,sha256=dHAr_pQLbbDFoRnbVMrQVztVUvnBFgFlm7bU82pMvOk,934 -jsonschema/__main__.py,sha256=in4bbzfixCAyGe3RhBwhQVZnGkruszNedcbmwxGyJgc,39 -jsonschema/_format.py,sha256=vwD1v7S8BmJvSF5y0o6dbPgjAyzt07PZpyO3pvNVVgQ,11691 -jsonschema/_legacy_validators.py,sha256=kYcYiHfRV-aQtIQv2qe_71L3QFs3LiJ3v69ifteAN4E,4584 -jsonschema/_reflect.py,sha256=gggQrcrf5FRoyhgdE6ggJ4n2FQHEzWS4CS-cm9bYcqI,5023 -jsonschema/_types.py,sha256=t2naRRhuTKIUIB0GMR9kOp2la2aVqeT2tFlVAobndmg,4490 -jsonschema/_utils.py,sha256=ezZJMQ0eU4oPvkTmZi6g5vsCtIFRhb8vN4Y9S4uQwW8,5168 -jsonschema/_validators.py,sha256=UDYawpxK8f_rIeEBXZtwr0tlxi3OH1Zt2ca0zAxjNdk,11703 -jsonschema/benchmarks/__init__.py,sha256=A0sQrxDBVHSyQ-8ru3L11hMXf3q9gVuB9x_YgHb4R9M,70 -jsonschema/benchmarks/issue232.py,sha256=-azAUmrP75f0uj0x2zEdBc3-DhQw3XX9UQVDCyhBKRk,541 -jsonschema/benchmarks/json_schema_test_suite.py,sha256=okRE6ACue2C0Hd1dMhnpZ0bc3AoZdDd8cw2lwTnbzwU,343 -jsonschema/cli.py,sha256=3Vc8ptc2GD7zDxK2F-kamqmrE9f35a2KVDGR1p1acUU,2310 -jsonschema/compat.py,sha256=37gSA8MmAR65zlqzsSEB-0ObZk_I2TF7z1kp9zmkskg,1353 -jsonschema/exceptions.py,sha256=ukWIE7aEES8Kh0UaUP9turpUkV2ZzXEN8CwfRObzlMA,10450 -jsonschema/schemas/draft3.json,sha256=PdtCu2s06Va3hV9cX5A5-rvye50SVF__NrvxG0vuzz0,4564 -jsonschema/schemas/draft4.json,sha256=ODL-0W3kv7MmqL3tm3GJguuVxN1QNO1GtBcpWE3ok98,5399 -jsonschema/schemas/draft6.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437 -jsonschema/schemas/draft7.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819 -jsonschema/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -jsonschema/tests/_helpers.py,sha256=3c-b9CK0cdGfhtuUhzM1AjtqPtR2VFvfcKC6G2g0a-0,157 -jsonschema/tests/_suite.py,sha256=6lxDHOyjJfCjdn9vfOLcUpXtNl0vLIljrinSFi1tRhc,6728 -jsonschema/tests/test_cli.py,sha256=djw7ZD6zm5_8FgsAr9XyYk4zErIEoPRs8SzBe5nYcWY,4727 -jsonschema/tests/test_exceptions.py,sha256=zw9bd_al5zOzAm8nJ0IqeymiweH6i8k1AN3CB7t618A,15348 -jsonschema/tests/test_format.py,sha256=ob0QDop_nwRwiLs1P6sGsf6ZITik00CWhe1pL8JRiA0,2982 -jsonschema/tests/test_jsonschema_test_suite.py,sha256=8uiplgvQq5yFvtvWxbyqyr7HMYRCx6jNE3OiU-u8AEk,8464 -jsonschema/tests/test_types.py,sha256=lntWPZ86fwo_aNKbfCueX5R2xdwrYYN7Zo5C0-ppk-0,5902 -jsonschema/tests/test_validators.py,sha256=R_zhsDKG5r66LE1OVlzdcPyKRWKgc07e6NVWxQkrRiQ,60394 -jsonschema/validators.py,sha256=RIZTQyZxhWwsyIIRFQGEjLzq38LlyzzzdYUl9jxzV0M,29400 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/WHEEL deleted file mode 100644 index 8b701e9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/COPYING b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/COPYING similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/COPYING rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/COPYING diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/importlib_metadata-4.10.1.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/METADATA similarity index 61% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/METADATA index aef9b18..5606353 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/METADATA @@ -1,51 +1,59 @@ Metadata-Version: 2.1 Name: jsonschema -Version: 3.2.0 +Version: 4.4.0 Summary: An implementation of JSON Schema validation for Python Home-page: https://github.com/Julian/jsonschema Author: Julian Berman Author-email: Julian@GrayVines.com -License: UNKNOWN -Project-URL: Docs, https://python-jsonschema.readthedocs.io/en/latest/ +License: MIT +Project-URL: Documentation, https://python-jsonschema.readthedocs.io/en/latest/ +Project-URL: Source, https://github.com/Julian/jsonschema +Project-URL: Issues, https://github.com/Julian/jsonschema/issues/ Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: COPYING Requires-Dist: attrs (>=17.4.0) -Requires-Dist: pyrsistent (>=0.14.0) -Requires-Dist: setuptools -Requires-Dist: six (>=1.11.0) -Requires-Dist: functools32 ; python_version < "3" +Requires-Dist: pyrsistent (!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0) Requires-Dist: importlib-metadata ; python_version < "3.8" +Requires-Dist: typing-extensions ; python_version < "3.8" +Requires-Dist: importlib-resources (>=1.4.0) ; python_version < "3.9" Provides-Extra: format +Requires-Dist: fqdn ; extra == 'format' Requires-Dist: idna ; extra == 'format' +Requires-Dist: isoduration ; extra == 'format' Requires-Dist: jsonpointer (>1.13) ; extra == 'format' +Requires-Dist: rfc3339-validator ; extra == 'format' Requires-Dist: rfc3987 ; extra == 'format' -Requires-Dist: strict-rfc3339 ; extra == 'format' -Requires-Dist: webcolors ; extra == 'format' +Requires-Dist: uri-template ; extra == 'format' +Requires-Dist: webcolors (>=1.11) ; extra == 'format' Provides-Extra: format_nongpl +Requires-Dist: fqdn ; extra == 'format_nongpl' Requires-Dist: idna ; extra == 'format_nongpl' +Requires-Dist: isoduration ; extra == 'format_nongpl' Requires-Dist: jsonpointer (>1.13) ; extra == 'format_nongpl' -Requires-Dist: webcolors ; extra == 'format_nongpl' -Requires-Dist: rfc3986-validator (>0.1.0) ; extra == 'format_nongpl' Requires-Dist: rfc3339-validator ; extra == 'format_nongpl' +Requires-Dist: rfc3986-validator (>0.1.0) ; extra == 'format_nongpl' +Requires-Dist: uri-template ; extra == 'format_nongpl' +Requires-Dist: webcolors (>=1.11) ; extra == 'format_nongpl' ========== jsonschema ========== -|PyPI| |Pythons| |Travis| |AppVeyor| |Codecov| |ReadTheDocs| +|PyPI| |Pythons| |CI| |ReadTheDocs| |Precommit| |Zenodo| .. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema.svg :alt: PyPI version @@ -55,25 +63,24 @@ jsonschema :alt: Supported Python versions :target: https://pypi.org/project/jsonschema/ -.. |Travis| image:: https://travis-ci.com/Julian/jsonschema.svg?branch=master - :alt: Travis build status - :target: https://travis-ci.com/Julian/jsonschema - -.. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/adtt0aiaihy6muyn/branch/master?svg=true - :alt: AppVeyor build status - :target: https://ci.appveyor.com/project/Julian/jsonschema - -.. |Codecov| image:: https://codecov.io/gh/Julian/jsonschema/branch/master/graph/badge.svg - :alt: Codecov Code coverage - :target: https://codecov.io/gh/Julian/jsonschema +.. |CI| image:: https://github.com/Julian/jsonschema/workflows/CI/badge.svg + :alt: Build status + :target: https://github.com/Julian/jsonschema/actions?query=workflow%3ACI .. |ReadTheDocs| image:: https://readthedocs.org/projects/python-jsonschema/badge/?version=stable&style=flat :alt: ReadTheDocs status :target: https://python-jsonschema.readthedocs.io/en/stable/ +.. |Precommit| image:: https://results.pre-commit.ci/badge/github/Julian/jsonschema/main.svg + :alt: pre-commit.ci status + :target: https://results.pre-commit.ci/latest/github/Julian/jsonschema/main + +.. |Zenodo| image:: https://zenodo.org/badge/3072629.svg + :target: https://zenodo.org/badge/latestdoi/3072629 + -``jsonschema`` is an implementation of `JSON Schema `_ -for Python (supporting 2.7+ including Python 3). +``jsonschema`` is an implementation of the `JSON Schema +`_ specification for Python. .. code-block:: python @@ -102,19 +109,23 @@ It can also be used from console: .. code-block:: bash - $ jsonschema -i sample.json sample.schema + $ jsonschema --instance sample.json sample.schema Features -------- -* Full support for +* Partial support for + `Draft 2020-12 `_ and + `Draft 2019-09 `_, + except for ``dynamicRef`` / ``recursiveRef`` and ``$vocabulary`` (in-progress). + Full support for `Draft 7 `_, `Draft 6 `_, `Draft 4 `_ and `Draft 3 `_ -* `Lazy validation `_ +* `Lazy validation `_ that can iteratively report *all* validation errors. * `Programmatic querying `_ @@ -131,33 +142,6 @@ Installation $ pip install jsonschema -Demo ----- - -Try ``jsonschema`` interactively in this online demo: - -.. image:: https://user-images.githubusercontent.com/1155573/56745335-8b158a00-6750-11e9-8776-83fa675939c4.png - :target: https://notebooks.ai/demo/gh/Julian/jsonschema - :alt: Open Live Demo - - -Online demo Notebook will look similar to this: - - -.. image:: https://user-images.githubusercontent.com/1155573/56820861-5c1c1880-6823-11e9-802a-ce01c5ec574f.gif - :alt: Open Live Demo - :width: 480 px - - -Release Notes -------------- - -v3.1 brings support for ECMA 262 dialect regular expressions -throughout schemas, as recommended by the specification. Big -thanks to @Zac-HD for authoring support in a new `js-regex -`_ library. - - Running the Test Suite ---------------------- @@ -176,29 +160,21 @@ Benchmarks ---------- ``jsonschema``'s benchmarks make use of `pyperf -`_. +`_. Running them can be done via:: -Running them can be done via ``tox -e perf``, or by invoking the ``pyperf`` -commands externally (after ensuring that both it and ``jsonschema`` itself are -installed):: - - $ python -m pyperf jsonschema/benchmarks/test_suite.py --hist --output results.json - -To compare to a previous run, use:: - - $ python -m pyperf compare_to --table reference.json results.json - -See the ``pyperf`` documentation for more details. + $ tox -e perf Community --------- -There's a `mailing list `_ -for this implementation on Google Groups. - -Please join, and feel free to send questions there. +The JSON Schema specification has `a Slack +`_, with an `invite link on its home page +`_. Many folks knowledgeable on authoring +schemas can be found there. +Otherwise, asking questions on Stack Overflow is another means of +getting help if you're stuck. Contributing ------------ @@ -210,11 +186,11 @@ I'm Julian Berman. Get in touch, via GitHub or otherwise, if you've got something to contribute, it'd be most welcome! -You can also generally find me on Freenode (nick: ``tos9``) in various +You can also generally find me on Libera (nick: ``Julian``) in various channels, including ``#python``. -If you feel overwhelmingly grateful, you can also woo me with beer money -via Google Pay with the email in my GitHub profile. +If you feel overwhelmingly grateful, you can also `sponsor me +`_. And for companies who appreciate ``jsonschema`` and its continued support and growth, ``jsonschema`` is also now supportable via `TideLift diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/RECORD new file mode 100644 index 0000000..bf4e290 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/RECORD @@ -0,0 +1,43 @@ +../../bin/jsonschema,sha256=lxBKiFj4zfSEeher0xvOrFwGZp6m2vSW1P8sy-V0baA,213 +jsonschema-4.4.0.dist-info/COPYING,sha256=T5KgFaE8TRoEC-8BiqE0MLTxvHO0Gxa7hGw0Z2bedDk,1057 +jsonschema-4.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jsonschema-4.4.0.dist-info/METADATA,sha256=mf-oc4RNWKinypj-AgQJwf17Ejw4Jiu9mH1JDPc3LUM,7539 +jsonschema-4.4.0.dist-info/RECORD,, +jsonschema-4.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema-4.4.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +jsonschema-4.4.0.dist-info/entry_points.txt,sha256=KaVUBBSLyzi5naUkVg-r3q6T_igdLgaHY6Mm3oLX73s,52 +jsonschema-4.4.0.dist-info/top_level.txt,sha256=jGoNS61vDONU8U7p0Taf-y_8JVG1Z2CJ5Eif6zMN_cw,11 +jsonschema/__init__.py,sha256=h0l2RPVM9kimU7-jTSKoEnguV3QGvrrQvlnJN3F6UPk,1561 +jsonschema/__main__.py,sha256=Sfz1ZNeogymj_KZxq6JXY3F6O_1v28sLIiskusifQ5s,40 +jsonschema/_format.py,sha256=MqdmiZPvQcseyH28byggqxnTUGB52oP9X1jiT5yVwDw,13156 +jsonschema/_legacy_validators.py,sha256=-LlXuPD8n1vUI4PUxhLp5xMLPXQNl7PiL3KYQmulyco,7199 +jsonschema/_reflect.py,sha256=qrE9u6y_d7MRIXWReN3Kiwkyytm3lQh6Pfdj9qvrbaY,4859 +jsonschema/_types.py,sha256=_NDm3OxdPPWAqBSpfo4QVEA_oqfKMACg1QslVx0S900,5364 +jsonschema/_utils.py,sha256=JsFatTW-dPS7V4H5Xdn9aw15HlNlSxvaO3iTsFqWs_Y,10415 +jsonschema/_validators.py,sha256=bRgXtl4UpD5lmy5qZOtwe92IJC-_2BbUx8oZzKDw4zE,15434 +jsonschema/benchmarks/__init__.py,sha256=A0sQrxDBVHSyQ-8ru3L11hMXf3q9gVuB9x_YgHb4R9M,70 +jsonschema/benchmarks/issue232.py,sha256=r_V1CaY1rLHP0UCxoEeQhZe5kwQBkdQYdPKmxCj7DbE,495 +jsonschema/benchmarks/json_schema_test_suite.py,sha256=PvfabpUYcF4_7csYDTcTauED8rnFEGYbdY5RqTXD08s,320 +jsonschema/cli.py,sha256=ldAuYYfY9OvQLnAT5PEQrGQn7-fBy_rSY_V9ZesjP8g,8136 +jsonschema/exceptions.py,sha256=utvZjE7HBABp7w5XXWie0EksGpmKD-Hb2yfdOQ93eMM,10268 +jsonschema/protocols.py,sha256=le6gCn2Zr-j8RuGuI1mF78s483PWUaGcOG3GhnChv20,6012 +jsonschema/schemas/draft2019-09.json,sha256=e3YbPhIfCgyh6ioLjizIVrz4AWBLgmjXG6yqICvAwTs,1785 +jsonschema/schemas/draft2020-12.json,sha256=Qdp29a-3zgYtJI92JGOpL3ykfk4PkFsiS6av7vkd7Q8,2452 +jsonschema/schemas/draft3.json,sha256=2LanCgvBrUT8Eyk37KszzCjFxuOw0UBFOeS-ahb5Crg,2699 +jsonschema/schemas/draft4.json,sha256=d-VZ-zmogXIypnObMGPT_e88TPZ9Zb40jd2-Fuvs9j4,4355 +jsonschema/schemas/draft6.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437 +jsonschema/schemas/draft7.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819 +jsonschema/schemas/vocabularies.json,sha256=SW7oOta6bhkEdVDPBKgvrosztMW_UyKs-s04pgpgXqs,12845 +jsonschema/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema/tests/_helpers.py,sha256=3c-b9CK0cdGfhtuUhzM1AjtqPtR2VFvfcKC6G2g0a-0,157 +jsonschema/tests/_suite.py,sha256=1uc_lOHcwxyfyL7DujRQMzPg3xvoQoVkg15ks3RwCjk,6482 +jsonschema/tests/fuzz_validate.py,sha256=GeNlFQepS7ax7Sh90iISVYQXjUkPCUF0c20jEPgPx8s,1085 +jsonschema/tests/test_cli.py,sha256=y52uBGTEgab6IhnTLSaA94xUTLLp3OKSQiy3qtiRMCQ,28674 +jsonschema/tests/test_deprecations.py,sha256=paMq3Hd33zDfVsJpTd95MAOzI6y7IoUQ5brgp9qqVdU,3901 +jsonschema/tests/test_exceptions.py,sha256=WOFFmvp9l9OgCR-bPx_VkLifuNNn7xnPeqpqk7Tjxf8,15700 +jsonschema/tests/test_format.py,sha256=Gu4are4xUyRQc8YL0z-RlDOIc9_96ISv83hZRf8R2t0,3763 +jsonschema/tests/test_jsonschema_test_suite.py,sha256=5Ej98xJe61PBw8uwanoY_D5zMY5wivy3dOjBn38t9uc,13605 +jsonschema/tests/test_types.py,sha256=DyvSKPtuaIu93Lkde80PkJkNOKgvCbaDYAfHz0yxyL0,6803 +jsonschema/tests/test_utils.py,sha256=lJRVYyQeZQTUCTU_M3BhlkxPMgjsc8KQCd7U_Qkook8,3749 +jsonschema/tests/test_validators.py,sha256=qkNF5FSMqB9nDSrmsuckOx-MOGDXTUTDzdtODwjnznE,73618 +jsonschema/validators.py,sha256=iJrjNm6J-6yyQBSu85HCfDyftrn-4loltqErfIcFtRk,34163 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/REQUESTED similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/REQUESTED rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/REQUESTED diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/WHEEL similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/charset_normalizer-2.0.11.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/WHEEL diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/entry_points.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/entry_points.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/entry_points.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/entry_points.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-4.4.0.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__init__.py old mode 100755 new mode 100644 index 6b630cd..75f2946 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__init__.py @@ -7,28 +7,52 @@ Most commonly, `validate` is the quickest way to simply validate a given instance under a schema, and will create a validator for you. """ +import warnings -from jsonschema.exceptions import ( - ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError -) from jsonschema._format import ( FormatChecker, draft3_format_checker, draft4_format_checker, draft6_format_checker, draft7_format_checker, + draft201909_format_checker, + draft202012_format_checker, ) from jsonschema._types import TypeChecker +from jsonschema.exceptions import ( + ErrorTree, + FormatError, + RefResolutionError, + SchemaError, + ValidationError, +) +from jsonschema.protocols import Validator from jsonschema.validators import ( Draft3Validator, Draft4Validator, Draft6Validator, Draft7Validator, + Draft201909Validator, + Draft202012Validator, RefResolver, validate, ) -try: - from importlib import metadata -except ImportError: # for Python<3.8 - import importlib_metadata as metadata -__version__ = metadata.version("jsonschema") + + +def __getattr__(name): + if name == "__version__": + warnings.warn( + "Accessing jsonschema.__version__ is deprecated and will be " + "removed in a future release. Use importlib.metadata directly " + "to query for jsonschema's version.", + DeprecationWarning, + stacklevel=2, + ) + + try: + from importlib import metadata + except ImportError: + import importlib_metadata as metadata + + return metadata.version("jsonschema") + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__main__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__main__.py old mode 100755 new mode 100644 index 82c29fd..fdc21e2 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__main__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/__main__.py @@ -1,2 +1,3 @@ from jsonschema.cli import main + main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_format.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_format.py old mode 100755 new mode 100644 index 281a7cf..5f99c65 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_format.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_format.py @@ -1,9 +1,12 @@ +from __future__ import annotations + +from contextlib import suppress +from uuid import UUID import datetime +import ipaddress import re -import socket -import struct +import typing -from jsonschema.compat import str_types from jsonschema.exceptions import FormatError @@ -24,13 +27,19 @@ class FormatChecker(object): Arguments: - formats (~collections.Iterable): + formats (~collections.abc.Iterable): The known formats to validate. This argument can be used to limit which formats will be used during validation. """ - checkers = {} + checkers: dict[ + str, + tuple[ + typing.Callable[[typing.Any], bool], + Exception | tuple[Exception, ...], + ], + ] = {} def __init__(self, formats=None): if formats is None: @@ -98,9 +107,7 @@ def check(self, instance, format): except raises as e: cause = e if not result: - raise FormatError( - "%r is not a %r" % (instance, format), cause=cause, - ) + raise FormatError(f"{instance!r} is not a {format!r}", cause=cause) def conforms(self, instance, format): """ @@ -133,13 +140,16 @@ def conforms(self, instance, format): draft4_format_checker = FormatChecker() draft6_format_checker = FormatChecker() draft7_format_checker = FormatChecker() - +draft201909_format_checker = FormatChecker() +draft202012_format_checker = FormatChecker() _draft_checkers = dict( draft3=draft3_format_checker, draft4=draft4_format_checker, draft6=draft6_format_checker, draft7=draft7_format_checker, + draft201909=draft201909_format_checker, + draft202012=draft202012_format_checker, ) @@ -149,12 +159,16 @@ def _checks_drafts( draft4=None, draft6=None, draft7=None, + draft201909=None, + draft202012=None, raises=(), ): draft3 = draft3 or name draft4 = draft4 or name draft6 = draft6 or name draft7 = draft7 or name + draft201909 = draft201909 or name + draft202012 = draft202012 or name def wrap(func): if draft3: @@ -165,13 +179,22 @@ def wrap(func): func = _draft_checkers["draft6"].checks(draft6, raises)(func) if draft7: func = _draft_checkers["draft7"].checks(draft7, raises)(func) + if draft201909: + func = _draft_checkers["draft201909"].checks(draft201909, raises)( + func, + ) + if draft202012: + func = _draft_checkers["draft202012"].checks(draft202012, raises)( + func, + ) # Oy. This is bad global state, but relied upon for now, until # deprecation. See https://github.com/Julian/jsonschema/issues/519 # and test_format_checkers_come_with_defaults - FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)( - func, - ) + FormatChecker.cls_checks( + draft202012 or draft201909 or draft7 or draft6 or draft4 or draft3, + raises, + )(func) return func return wrap @@ -179,67 +202,63 @@ def wrap(func): @_checks_drafts(name="idn-email") @_checks_drafts(name="email") def is_email(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return "@" in instance -_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$") - - @_checks_drafts( - draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4", + draft3="ip-address", + draft4="ipv4", + draft6="ipv4", + draft7="ipv4", + draft201909="ipv4", + draft202012="ipv4", + raises=ipaddress.AddressValueError, ) def is_ipv4(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True - if not _ipv4_re.match(instance): - return False - return all(0 <= int(component) <= 255 for component in instance.split(".")) + return ipaddress.IPv4Address(instance) -if hasattr(socket, "inet_pton"): - # FIXME: Really this only should raise struct.error, but see the sadness - # that is https://twistedmatrix.com/trac/ticket/9409 - @_checks_drafts( - name="ipv6", raises=(socket.error, struct.error, ValueError), - ) - def is_ipv6(instance): - if not isinstance(instance, str_types): - return True - return socket.inet_pton(socket.AF_INET6, instance) - +@_checks_drafts(name="ipv6", raises=ipaddress.AddressValueError) +def is_ipv6(instance): + if not isinstance(instance, str): + return True + address = ipaddress.IPv6Address(instance) + return not getattr(address, "scope_id", "") -_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$") +with suppress(ImportError): + from fqdn import FQDN -@_checks_drafts( - draft3="host-name", - draft4="hostname", - draft6="hostname", - draft7="hostname", -) -def is_host_name(instance): - if not isinstance(instance, str_types): - return True - if not _host_name_re.match(instance): - return False - components = instance.split(".") - for component in components: - if len(component) > 63: - return False - return True + @_checks_drafts( + draft3="host-name", + draft4="hostname", + draft6="hostname", + draft7="hostname", + draft201909="hostname", + draft202012="hostname", + ) + def is_host_name(instance): + if not isinstance(instance, str): + return True + return FQDN(instance).is_valid -try: +with suppress(ImportError): # The built-in `idna` codec only implements RFC 3890, so we go elsewhere. import idna -except ImportError: - pass -else: - @_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError) + + @_checks_drafts( + draft7="idn-hostname", + draft201909="idn-hostname", + draft202012="idn-hostname", + raises=(idna.IDNAError, UnicodeError), + ) def is_idn_host_name(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True idna.encode(instance) return True @@ -248,135 +267,144 @@ def is_idn_host_name(instance): try: import rfc3987 except ImportError: - try: + with suppress(ImportError): from rfc3986_validator import validate_rfc3986 - except ImportError: - pass - else: + @_checks_drafts(name="uri") def is_uri(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return validate_rfc3986(instance, rule="URI") @_checks_drafts( draft6="uri-reference", draft7="uri-reference", + draft201909="uri-reference", + draft202012="uri-reference", raises=ValueError, ) def is_uri_reference(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return validate_rfc3986(instance, rule="URI_reference") else: - @_checks_drafts(draft7="iri", raises=ValueError) + @_checks_drafts( + draft7="iri", + draft201909="iri", + draft202012="iri", + raises=ValueError, + ) def is_iri(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return rfc3987.parse(instance, rule="IRI") - @_checks_drafts(draft7="iri-reference", raises=ValueError) + @_checks_drafts( + draft7="iri-reference", + draft201909="iri-reference", + draft202012="iri-reference", + raises=ValueError, + ) def is_iri_reference(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return rfc3987.parse(instance, rule="IRI_reference") @_checks_drafts(name="uri", raises=ValueError) def is_uri(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return rfc3987.parse(instance, rule="URI") @_checks_drafts( draft6="uri-reference", draft7="uri-reference", + draft201909="uri-reference", + draft202012="uri-reference", raises=ValueError, ) def is_uri_reference(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return rfc3987.parse(instance, rule="URI_reference") +with suppress(ImportError): + from rfc3339_validator import validate_rfc3339 -try: - from strict_rfc3339 import validate_rfc3339 -except ImportError: - try: - from rfc3339_validator import validate_rfc3339 - except ImportError: - validate_rfc3339 = None - -if validate_rfc3339: @_checks_drafts(name="date-time") def is_datetime(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True - return validate_rfc3339(instance) + return validate_rfc3339(instance.upper()) - @_checks_drafts(draft7="time") + @_checks_drafts( + draft7="time", + draft201909="time", + draft202012="time", + ) def is_time(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return is_datetime("1970-01-01T" + instance) @_checks_drafts(name="regex", raises=re.error) def is_regex(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return re.compile(instance) -@_checks_drafts(draft3="date", draft7="date", raises=ValueError) +@_checks_drafts( + draft3="date", + draft7="date", + draft201909="date", + draft202012="date", + raises=ValueError, +) def is_date(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True - return datetime.datetime.strptime(instance, "%Y-%m-%d") + return instance.isascii() and datetime.date.fromisoformat(instance) @_checks_drafts(draft3="time", raises=ValueError) def is_draft3_time(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return datetime.datetime.strptime(instance, "%H:%M:%S") -try: +with suppress(ImportError): + from webcolors import CSS21_NAMES_TO_HEX import webcolors -except ImportError: - pass -else: + def is_css_color_code(instance): return webcolors.normalize_hex(instance) @_checks_drafts(draft3="color", raises=(ValueError, TypeError)) def is_css21_color(instance): if ( - not isinstance(instance, str_types) or - instance.lower() in webcolors.css21_names_to_hex + not isinstance(instance, str) + or instance.lower() in CSS21_NAMES_TO_HEX ): return True return is_css_color_code(instance) - def is_css3_color(instance): - if instance.lower() in webcolors.css3_names_to_hex: - return True - return is_css_color_code(instance) - -try: +with suppress(ImportError): import jsonpointer -except ImportError: - pass -else: + @_checks_drafts( draft6="json-pointer", draft7="json-pointer", + draft201909="json-pointer", + draft202012="json-pointer", raises=jsonpointer.JsonPointerException, ) def is_json_pointer(instance): - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True return jsonpointer.JsonPointer(instance) @@ -386,16 +414,22 @@ def is_json_pointer(instance): # into a new external library. @_checks_drafts( draft7="relative-json-pointer", + draft201909="relative-json-pointer", + draft202012="relative-json-pointer", raises=jsonpointer.JsonPointerException, ) def is_relative_json_pointer(instance): # Definition taken from: # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 - if not isinstance(instance, str_types): + if not isinstance(instance, str): return True non_negative_integer, rest = [], "" for i, character in enumerate(instance): if character.isdigit(): + # digits with a leading "0" are not allowed + if i > 0 and int(instance[i - 1]) == 0: + return False + non_negative_integer.append(character) continue @@ -407,19 +441,42 @@ def is_relative_json_pointer(instance): return (rest == "#") or jsonpointer.JsonPointer(rest) -try: - import uritemplate.exceptions -except ImportError: - pass -else: +with suppress(ImportError): + import uri_template + @_checks_drafts( draft6="uri-template", draft7="uri-template", - raises=uritemplate.exceptions.InvalidTemplate, + draft201909="uri-template", + draft202012="uri-template", ) - def is_uri_template( - instance, - template_validator=uritemplate.Validator().force_balanced_braces(), - ): - template = uritemplate.URITemplate(instance) - return template_validator.validate(template) + def is_uri_template(instance): + if not isinstance(instance, str): + return True + return uri_template.validate(instance) + + +with suppress(ImportError): + import isoduration + + @_checks_drafts( + draft201909="duration", + draft202012="duration", + raises=isoduration.DurationParsingException, + ) + def is_duration(instance): + if not isinstance(instance, str): + return True + return isoduration.parse_duration(instance) + + +@_checks_drafts( + draft201909="uuid", + draft202012="uuid", + raises=ValueError, +) +def is_uuid(instance): + if not isinstance(instance, str): + return True + UUID(instance) + return all(instance[position] == "-" for position in (8, 13, 18, 23)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_legacy_validators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_legacy_validators.py old mode 100755 new mode 100644 index 264ff7d..c8eff2c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_legacy_validators.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_legacy_validators.py @@ -1,49 +1,88 @@ from jsonschema import _utils -from jsonschema.compat import iteritems from jsonschema.exceptions import ValidationError +def ignore_ref_siblings(schema): + """ + Ignore siblings of ``$ref`` if it is present. + + Otherwise, return all validators. + + Suitable for use with `create`'s ``applicable_validators`` argument. + """ + ref = schema.get("$ref") + if ref is not None: + return [("$ref", ref)] + else: + return schema.items() + + def dependencies_draft3(validator, dependencies, instance, schema): if not validator.is_type(instance, "object"): return - for property, dependency in iteritems(dependencies): + for property, dependency in dependencies.items(): if property not in instance: continue if validator.is_type(dependency, "object"): - for error in validator.descend( + yield from validator.descend( instance, dependency, schema_path=property, - ): - yield error + ) elif validator.is_type(dependency, "string"): if dependency not in instance: - yield ValidationError( - "%r is a dependency of %r" % (dependency, property) - ) + message = f"{dependency!r} is a dependency of {property!r}" + yield ValidationError(message) else: for each in dependency: if each not in instance: - message = "%r is a dependency of %r" - yield ValidationError(message % (each, property)) + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + + +def dependencies_draft4_draft6_draft7( + validator, + dependencies, + instance, + schema, +): + """ + Support for the ``dependencies`` validator from pre-draft 2019-09. + + In later drafts, the validator was split into separate + ``dependentRequired`` and ``dependentSchemas`` validators. + """ + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependencies.items(): + if property not in instance: + continue + + if validator.is_type(dependency, "array"): + for each in dependency: + if each not in instance: + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + else: + yield from validator.descend( + instance, dependency, schema_path=property, + ) def disallow_draft3(validator, disallow, instance, schema): for disallowed in _utils.ensure_list(disallow): - if validator.is_valid(instance, {"type": [disallowed]}): - yield ValidationError( - "%r is disallowed for %r" % (disallowed, instance) - ) + if validator.evolve(schema={"type": [disallowed]}).is_valid(instance): + message = f"{disallowed!r} is disallowed for {instance!r}" + yield ValidationError(message) def extends_draft3(validator, extends, instance, schema): if validator.is_type(extends, "object"): - for error in validator.descend(instance, extends): - yield error + yield from validator.descend(instance, extends) return for index, subschema in enumerate(extends): - for error in validator.descend(instance, subschema, schema_path=index): - yield error + yield from validator.descend(instance, subschema, schema_path=index) def items_draft3_draft4(validator, items, instance, schema): @@ -52,14 +91,26 @@ def items_draft3_draft4(validator, items, instance, schema): if validator.is_type(items, "object"): for index, item in enumerate(instance): - for error in validator.descend(item, items, path=index): - yield error + yield from validator.descend(item, items, path=index) else: for (index, item), subschema in zip(enumerate(instance), items): - for error in validator.descend( + yield from validator.descend( + item, subschema, path=index, schema_path=index, + ) + + +def items_draft6_draft7_draft201909(validator, items, instance, schema): + if not validator.is_type(instance, "array"): + return + + if validator.is_type(items, "array"): + for (index, item), subschema in zip(enumerate(instance), items): + yield from validator.descend( item, subschema, path=index, schema_path=index, - ): - yield error + ) + else: + for index, item in enumerate(instance): + yield from validator.descend(item, items, path=index) def minimum_draft3_draft4(validator, minimum, instance, schema): @@ -74,9 +125,8 @@ def minimum_draft3_draft4(validator, minimum, instance, schema): cmp = "less than" if failed: - yield ValidationError( - "%r is %s the minimum of %r" % (instance, cmp, minimum) - ) + message = f"{instance!r} is {cmp} the minimum of {minimum!r}" + yield ValidationError(message) def maximum_draft3_draft4(validator, maximum, instance, schema): @@ -91,26 +141,24 @@ def maximum_draft3_draft4(validator, maximum, instance, schema): cmp = "greater than" if failed: - yield ValidationError( - "%r is %s the maximum of %r" % (instance, cmp, maximum) - ) + message = f"{instance!r} is {cmp} the maximum of {maximum!r}" + yield ValidationError(message) def properties_draft3(validator, properties, instance, schema): if not validator.is_type(instance, "object"): return - for property, subschema in iteritems(properties): + for property, subschema in properties.items(): if property in instance: - for error in validator.descend( + yield from validator.descend( instance[property], subschema, path=property, schema_path=property, - ): - yield error + ) elif subschema.get("required", False): - error = ValidationError("%r is a required property" % property) + error = ValidationError(f"{property!r} is a required property") error._set( validator="required", validator_value=subschema["required"], @@ -136,6 +184,41 @@ def type_draft3(validator, types, instance, schema): if validator.is_type(instance, type): return else: + reprs = [] + for type in types: + try: + reprs.append(repr(type["name"])) + except Exception: + reprs.append(repr(type)) yield ValidationError( - _utils.types_msg(instance, types), context=all_errors, + f"{instance!r} is not of type {', '.join(reprs)}", + context=all_errors, ) + + +def contains_draft6_draft7(validator, contains, instance, schema): + if not validator.is_type(instance, "array"): + return + + if not any( + validator.evolve(schema=contains).is_valid(element) + for element in instance + ): + yield ValidationError( + f"None of {instance!r} are valid under the given schema", + ) + + +def recursiveRef(validator, recursiveRef, instance, schema): + lookup_url, target = validator.resolver.resolution_scope, validator.schema + + for each in reversed(validator.resolver._scopes_stack[1:]): + lookup_url, next_target = validator.resolver.resolve(each) + if next_target.get("$recursiveAnchor"): + target = next_target + else: + break + + fragment = recursiveRef.lstrip("#") + subschema = validator.resolver.resolve_fragment(target, fragment) + yield from validator.descend(instance, subschema) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_reflect.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_reflect.py old mode 100755 new mode 100644 index d09e38f..39ee7a6 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_reflect.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_reflect.py @@ -9,8 +9,6 @@ import sys -from jsonschema.compat import PY3 - class _NoModuleFound(Exception): """ @@ -42,12 +40,8 @@ class ObjectNotFound(InvalidName): -if PY3: - def reraise(exception, traceback): - raise exception.with_traceback(traceback) -else: - exec("""def reraise(exception, traceback): - raise exception.__class__, exception, traceback""") +def reraise(exception, traceback): + raise exception.with_traceback(traceback) reraise.__doc__ = """ Re-raise an exception, with an optional traceback, in a way that is compatible diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_types.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_types.py old mode 100755 new mode 100644 index a71a4e3..9d59eb3 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_types.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_types.py @@ -1,12 +1,33 @@ +from __future__ import annotations + import numbers +import typing from pyrsistent import pmap import attr -from jsonschema.compat import int_types, str_types from jsonschema.exceptions import UndefinedTypeCheck +# unfortunately, the type of pmap is generic, and if used as the attr.ib +# converter, the generic type is presented to mypy, which then fails to match +# the concrete type of a type checker mapping +# this "do nothing" wrapper presents the correct information to mypy +def _typed_pmap_converter( + init_val: typing.Mapping[ + str, + typing.Callable[["TypeChecker", typing.Any], bool], + ], +) -> typing.Mapping[str, typing.Callable[["TypeChecker", typing.Any], bool]]: + return typing.cast( + typing.Mapping[ + str, + typing.Callable[["TypeChecker", typing.Any], bool], + ], + pmap(init_val), + ) + + def is_array(checker, instance): return isinstance(instance, list) @@ -19,7 +40,7 @@ def is_integer(checker, instance): # bool inherits from int, so ensure bools aren't reported as ints if isinstance(instance, bool): return False - return isinstance(instance, int_types) + return isinstance(instance, int) def is_null(checker, instance): @@ -38,7 +59,7 @@ def is_object(checker, instance): def is_string(checker, instance): - return isinstance(instance, str_types) + return isinstance(instance, str) def is_any(checker, instance): @@ -50,7 +71,7 @@ class TypeChecker(object): """ A ``type`` property checker. - A `TypeChecker` performs type checking for an `IValidator`. Type + A `TypeChecker` performs type checking for a `Validator`. Type checks to perform are updated using `TypeChecker.redefine` or `TypeChecker.redefine_many` and removed via `TypeChecker.remove`. Each of these return a new `TypeChecker` object. @@ -61,7 +82,13 @@ class TypeChecker(object): The initial mapping of types to their checking functions. """ - _type_checkers = attr.ib(default=pmap(), converter=pmap) + + _type_checkers: typing.Mapping[ + str, typing.Callable[["TypeChecker", typing.Any], bool], + ] = attr.ib( + default=pmap(), + converter=_typed_pmap_converter, + ) def is_type(self, instance, type): """ @@ -90,7 +117,7 @@ def is_type(self, instance, type): try: fn = self._type_checkers[type] except KeyError: - raise UndefinedTypeCheck(type) + raise UndefinedTypeCheck(type) from None return fn(self, instance) @@ -104,7 +131,7 @@ def redefine(self, type, fn): The name of the type to check. - fn (collections.Callable): + fn (collections.abc.Callable): A function taking exactly two parameters - the type checker calling the function and the instance to check. @@ -141,7 +168,7 @@ def remove(self, *types): Arguments: - types (~collections.Iterable): + types (~collections.abc.Iterable): the names of the types to remove. @@ -167,22 +194,24 @@ def remove(self, *types): draft3_type_checker = TypeChecker( { - u"any": is_any, - u"array": is_array, - u"boolean": is_bool, - u"integer": is_integer, - u"object": is_object, - u"null": is_null, - u"number": is_number, - u"string": is_string, + "any": is_any, + "array": is_array, + "boolean": is_bool, + "integer": is_integer, + "object": is_object, + "null": is_null, + "number": is_number, + "string": is_string, }, ) -draft4_type_checker = draft3_type_checker.remove(u"any") +draft4_type_checker = draft3_type_checker.remove("any") draft6_type_checker = draft4_type_checker.redefine( - u"integer", + "integer", lambda checker, instance: ( - is_integer(checker, instance) or - isinstance(instance, float) and instance.is_integer() + is_integer(checker, instance) + or isinstance(instance, float) and instance.is_integer() ), ) draft7_type_checker = draft6_type_checker +draft201909_type_checker = draft7_type_checker +draft202012_type_checker = draft201909_type_checker diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_utils.py old mode 100755 new mode 100644 index ceb8801..c66b07d --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_utils.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_utils.py @@ -1,9 +1,15 @@ +from collections.abc import Mapping, MutableMapping, Sequence +from urllib.parse import urlsplit import itertools import json -import pkgutil import re +import sys -from jsonschema.compat import MutableMapping, str_types, urlsplit +# The files() API was added in Python 3.9. +if sys.version_info >= (3, 9): # pragma: no cover + from importlib import resources +else: # pragma: no cover + import importlib_resources as resources # type: ignore class URIDict(MutableMapping): @@ -51,34 +57,31 @@ def load_schema(name): Load a schema from ./schemas/``name``.json and return it. """ - data = pkgutil.get_data("jsonschema", "schemas/{0}.json".format(name)) - return json.loads(data.decode("utf-8")) + path = resources.files(__package__).joinpath(f"schemas/{name}.json") + data = path.read_text(encoding="utf-8") + return json.loads(data) -def indent(string, times=1): - """ - A dumb version of `textwrap.indent` from Python 3.3. - """ - - return "\n".join(" " * (4 * times) + line for line in string.splitlines()) - - -def format_as_index(indices): +def format_as_index(container, indices): """ Construct a single string containing indexing operations for the indices. - For example, [1, 2, "foo"] -> [1][2]["foo"] + For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"] Arguments: + container (str): + + A word to use for the thing being indexed + indices (sequence): The indices to format. """ if not indices: - return "" - return "[%s]" % "][".join(repr(index) for index in indices) + return container + return f"{container}[{']['.join(repr(index) for index in indices)}]" def find_additional_properties(instance, schema): @@ -112,63 +115,52 @@ def extras_msg(extras): return ", ".join(repr(extra) for extra in extras), verb -def types_msg(instance, types): +def ensure_list(thing): """ - Create an error message for a failure to match the given types. - - If the ``instance`` is an object and contains a ``name`` property, it will - be considered to be a description of that object and used as its type. + Wrap ``thing`` in a list if it's a single str. - Otherwise the message is simply the reprs of the given ``types``. + Otherwise, return it unchanged. """ - reprs = [] - for type in types: - try: - reprs.append(repr(type["name"])) - except Exception: - reprs.append(repr(type)) - return "%r is not of type %s" % (instance, ", ".join(reprs)) + if isinstance(thing, str): + return [thing] + return thing -def flatten(suitable_for_isinstance): +def _mapping_equal(one, two): """ - isinstance() can accept a bunch of really annoying different types: - * a single type - * a tuple of types - * an arbitrary nested tree of tuples - - Return a flattened tuple of the given argument. + Check if two mappings are equal using the semantics of `equal`. """ + if len(one) != len(two): + return False + return all( + key in two and equal(value, two[key]) + for key, value in one.items() + ) - types = set() - - if not isinstance(suitable_for_isinstance, tuple): - suitable_for_isinstance = (suitable_for_isinstance,) - for thing in suitable_for_isinstance: - if isinstance(thing, tuple): - types.update(flatten(thing)) - else: - types.add(thing) - return tuple(types) - -def ensure_list(thing): +def _sequence_equal(one, two): """ - Wrap ``thing`` in a list if it's a single str. - - Otherwise, return it unchanged. + Check if two sequences are equal using the semantics of `equal`. """ - - if isinstance(thing, str_types): - return [thing] - return thing + if len(one) != len(two): + return False + return all(equal(i, j) for i, j in zip(one, two)) def equal(one, two): """ - Check if two things are equal, but evade booleans and ints being equal. + Check if two things are equal evading some Python type hierarchy semantics. + + Specifically in JSON Schema, evade `bool` inheriting from `int`, + recursing into sequences to do the same. """ + if isinstance(one, str) or isinstance(two, str): + return one == two + if isinstance(one, Sequence) and isinstance(two, Sequence): + return _sequence_equal(one, two) + if isinstance(one, Mapping) and isinstance(two, Mapping): + return _mapping_equal(one, two) return unbool(one) == unbool(two) @@ -188,25 +180,169 @@ def uniq(container): """ Check if all of a container's elements are unique. - Successively tries first to rely that the elements are hashable, then - falls back on them being sortable, and finally falls back on brute - force. + Tries to rely on the container being recursively sortable, or otherwise + falls back on (slow) brute force. """ - try: - return len(set(unbool(i) for i in container)) == len(container) - except TypeError: - try: - sort = sorted(unbool(i) for i in container) - sliced = itertools.islice(sort, 1, None) - for i, j in zip(sort, sliced): - if i == j: - return False - except (NotImplementedError, TypeError): - seen = [] - for e in container: - e = unbool(e) - if e in seen: + sort = sorted(unbool(i) for i in container) + sliced = itertools.islice(sort, 1, None) + + for i, j in zip(sort, sliced): + if equal(i, j): + return False + + except (NotImplementedError, TypeError): + seen = [] + for e in container: + e = unbool(e) + + for i in seen: + if equal(i, e): return False - seen.append(e) + + seen.append(e) return True + + +def find_evaluated_item_indexes_by_schema(validator, instance, schema): + """ + Get all indexes of items that get evaluated under the current schema + + Covers all keywords related to unevaluatedItems: items, prefixItems, if, + then, else, contains, unevaluatedItems, allOf, oneOf, anyOf + """ + if validator.is_type(schema, "boolean"): + return [] + evaluated_indexes = [] + + if "items" in schema: + return list(range(0, len(instance))) + + if "$ref" in schema: + scope, resolved = validator.resolver.resolve(schema["$ref"]) + validator.resolver.push_scope(scope) + + try: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, resolved) + finally: + validator.resolver.pop_scope() + + if "prefixItems" in schema: + evaluated_indexes += list(range(0, len(schema["prefixItems"]))) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["then"], + ) + else: + if "else" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["else"], + ) + + for keyword in ["contains", "unevaluatedItems"]: + if keyword in schema: + for k, v in enumerate(instance): + if validator.evolve(schema=schema[keyword]).is_valid(v): + evaluated_indexes.append(k) + + for keyword in ["allOf", "oneOf", "anyOf"]: + if keyword in schema: + for subschema in schema[keyword]: + errs = list(validator.descend(instance, subschema)) + if not errs: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, subschema, + ) + + return evaluated_indexes + + +def find_evaluated_property_keys_by_schema(validator, instance, schema): + """ + Get all keys of items that get evaluated under the current schema + + Covers all keywords related to unevaluatedProperties: properties, + additionalProperties, unevaluatedProperties, patternProperties, + dependentSchemas, allOf, oneOf, anyOf, if, then, else + """ + if validator.is_type(schema, "boolean"): + return [] + evaluated_keys = [] + + if "$ref" in schema: + scope, resolved = validator.resolver.resolve(schema["$ref"]) + validator.resolver.push_scope(scope) + + try: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, resolved, + ) + finally: + validator.resolver.pop_scope() + + for keyword in [ + "properties", "additionalProperties", "unevaluatedProperties", + ]: + if keyword in schema: + if validator.is_type(schema[keyword], "boolean"): + for property, value in instance.items(): + if validator.evolve(schema=schema[keyword]).is_valid( + {property: value}, + ): + evaluated_keys.append(property) + + if validator.is_type(schema[keyword], "object"): + for property, subschema in schema[keyword].items(): + if property in instance and validator.evolve( + schema=subschema, + ).is_valid(instance[property]): + evaluated_keys.append(property) + + if "patternProperties" in schema: + for property, value in instance.items(): + for pattern, _ in schema["patternProperties"].items(): + if re.search(pattern, property) and validator.evolve( + schema=schema["patternProperties"], + ).is_valid({property: value}): + evaluated_keys.append(property) + + if "dependentSchemas" in schema: + for property, subschema in schema["dependentSchemas"].items(): + if property not in instance: + continue + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, subschema, + ) + + for keyword in ["allOf", "oneOf", "anyOf"]: + if keyword in schema: + for subschema in schema[keyword]: + errs = list(validator.descend(instance, subschema)) + if not errs: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, subschema, + ) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["then"], + ) + else: + if "else" in schema: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["else"], + ) + + return evaluated_keys diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_validators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_validators.py old mode 100755 new mode 100644 index 179fec0..9a07f5e --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_validators.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/_validators.py @@ -1,3 +1,5 @@ +from fractions import Fraction +from urllib.parse import urldefrag, urljoin import re from jsonschema._utils import ( @@ -5,25 +7,24 @@ equal, extras_msg, find_additional_properties, - types_msg, + find_evaluated_item_indexes_by_schema, + find_evaluated_property_keys_by_schema, unbool, uniq, ) from jsonschema.exceptions import FormatError, ValidationError -from jsonschema.compat import iteritems def patternProperties(validator, patternProperties, instance, schema): if not validator.is_type(instance, "object"): return - for pattern, subschema in iteritems(patternProperties): - for k, v in iteritems(instance): + for pattern, subschema in patternProperties.items(): + for k, v in instance.items(): if re.search(pattern, k): - for error in validator.descend( + yield from validator.descend( v, subschema, path=k, schema_path=pattern, - ): - yield error + ) def propertyNames(validator, propertyNames, instance, schema): @@ -31,11 +32,7 @@ def propertyNames(validator, propertyNames, instance, schema): return for property in instance: - for error in validator.descend( - instance=property, - schema=propertyNames, - ): - yield error + yield from validator.descend(instance=property, schema=propertyNames) def additionalProperties(validator, aP, instance, schema): @@ -46,20 +43,19 @@ def additionalProperties(validator, aP, instance, schema): if validator.is_type(aP, "object"): for extra in extras: - for error in validator.descend(instance[extra], aP, path=extra): - yield error + yield from validator.descend(instance[extra], aP, path=extra) elif not aP and extras: if "patternProperties" in schema: - patterns = sorted(schema["patternProperties"]) if len(extras) == 1: verb = "does" else: verb = "do" - error = "%s %s not match any of the regexes: %s" % ( - ", ".join(map(repr, sorted(extras))), - verb, - ", ".join(map(repr, patterns)), + + joined = ", ".join(repr(each) for each in sorted(extras)) + patterns = ", ".join( + repr(each) for each in sorted(schema["patternProperties"]) ) + error = f"{joined} {verb} not match any of the regexes: {patterns}" yield ValidationError(error) else: error = "Additional properties are not allowed (%s %s unexpected)" @@ -70,51 +66,76 @@ def items(validator, items, instance, schema): if not validator.is_type(instance, "array"): return - if validator.is_type(items, "array"): - for (index, item), subschema in zip(enumerate(instance), items): - for error in validator.descend( - item, subschema, path=index, schema_path=index, - ): - yield error + prefix = len(schema.get("prefixItems", [])) + total = len(instance) + if items is False and total > prefix: + message = f"Expected at most {prefix} items, but found {total}" + yield ValidationError(message) else: - for index, item in enumerate(instance): - for error in validator.descend(item, items, path=index): - yield error + for index in range(prefix, total): + yield from validator.descend( + instance=instance[index], + schema=items, + path=index, + ) def additionalItems(validator, aI, instance, schema): if ( - not validator.is_type(instance, "array") or - validator.is_type(schema.get("items", {}), "object") + not validator.is_type(instance, "array") + or validator.is_type(schema.get("items", {}), "object") ): return len_items = len(schema.get("items", [])) if validator.is_type(aI, "object"): for index, item in enumerate(instance[len_items:], start=len_items): - for error in validator.descend(item, aI, path=index): - yield error + yield from validator.descend(item, aI, path=index) elif not aI and len(instance) > len(schema.get("items", [])): error = "Additional items are not allowed (%s %s unexpected)" yield ValidationError( - error % - extras_msg(instance[len(schema.get("items", [])):]) + error % extras_msg(instance[len(schema.get("items", [])):]), ) def const(validator, const, instance, schema): if not equal(instance, const): - yield ValidationError("%r was expected" % (const,)) + yield ValidationError(f"{const!r} was expected") def contains(validator, contains, instance, schema): if not validator.is_type(instance, "array"): return - if not any(validator.is_valid(element, contains) for element in instance): - yield ValidationError( - "None of %r are valid under the given schema" % (instance,) - ) + matches = 0 + min_contains = schema.get("minContains", 1) + max_contains = schema.get("maxContains", len(instance)) + + for each in instance: + if validator.evolve(schema=contains).is_valid(each): + matches += 1 + if matches > max_contains: + yield ValidationError( + "Too many items match the given schema " + f"(expected at most {max_contains})", + validator="maxContains", + validator_value=max_contains, + ) + return + + if matches < min_contains: + if not matches: + yield ValidationError( + f"{instance!r} does not contain items " + "matching the given schema", + ) + else: + yield ValidationError( + "Too few items match the given schema (expected at least " + f"{min_contains} but only {matches} matched)", + validator="minContains", + validator_value=min_contains, + ) def exclusiveMinimum(validator, minimum, instance, schema): @@ -123,9 +144,8 @@ def exclusiveMinimum(validator, minimum, instance, schema): if instance <= minimum: yield ValidationError( - "%r is less than or equal to the minimum of %r" % ( - instance, minimum, - ), + f"{instance!r} is less than or equal to " + f"the minimum of {minimum!r}", ) @@ -135,9 +155,8 @@ def exclusiveMaximum(validator, maximum, instance, schema): if instance >= maximum: yield ValidationError( - "%r is greater than or equal to the maximum of %r" % ( - instance, maximum, - ), + f"{instance!r} is greater than or equal " + f"to the maximum of {maximum!r}", ) @@ -146,9 +165,8 @@ def minimum(validator, minimum, instance, schema): return if instance < minimum: - yield ValidationError( - "%r is less than the minimum of %r" % (instance, minimum) - ) + message = f"{instance!r} is less than the minimum of {minimum!r}" + yield ValidationError(message) def maximum(validator, maximum, instance, schema): @@ -156,9 +174,8 @@ def maximum(validator, maximum, instance, schema): return if instance > maximum: - yield ValidationError( - "%r is greater than the maximum of %r" % (instance, maximum) - ) + message = f"{instance!r} is greater than the maximum of {maximum!r}" + yield ValidationError(message) def multipleOf(validator, dB, instance, schema): @@ -167,39 +184,52 @@ def multipleOf(validator, dB, instance, schema): if isinstance(dB, float): quotient = instance / dB - failed = int(quotient) != quotient + try: + failed = int(quotient) != quotient + except OverflowError: + # When `instance` is large and `dB` is less than one, + # quotient can overflow to infinity; and then casting to int + # raises an error. + # + # In this case we fall back to Fraction logic, which is + # exact and cannot overflow. The performance is also + # acceptable: we try the fast all-float option first, and + # we know that fraction(dB) can have at most a few hundred + # digits in each part. The worst-case slowdown is therefore + # for already-slow enormous integers or Decimals. + failed = (Fraction(instance) / Fraction(dB)).denominator != 1 else: failed = instance % dB if failed: - yield ValidationError("%r is not a multiple of %r" % (instance, dB)) + yield ValidationError(f"{instance!r} is not a multiple of {dB}") def minItems(validator, mI, instance, schema): if validator.is_type(instance, "array") and len(instance) < mI: - yield ValidationError("%r is too short" % (instance,)) + yield ValidationError(f"{instance!r} is too short") def maxItems(validator, mI, instance, schema): if validator.is_type(instance, "array") and len(instance) > mI: - yield ValidationError("%r is too long" % (instance,)) + yield ValidationError(f"{instance!r} is too long") def uniqueItems(validator, uI, instance, schema): if ( - uI and - validator.is_type(instance, "array") and - not uniq(instance) + uI + and validator.is_type(instance, "array") + and not uniq(instance) ): - yield ValidationError("%r has non-unique elements" % (instance,)) + yield ValidationError(f"{instance!r} has non-unique elements") def pattern(validator, patrn, instance, schema): if ( - validator.is_type(instance, "string") and - not re.search(patrn, instance) + validator.is_type(instance, "string") + and not re.search(patrn, instance) ): - yield ValidationError("%r does not match %r" % (instance, patrn)) + yield ValidationError(f"{instance!r} does not match {patrn!r}") def format(validator, format, instance, schema): @@ -212,80 +242,99 @@ def format(validator, format, instance, schema): def minLength(validator, mL, instance, schema): if validator.is_type(instance, "string") and len(instance) < mL: - yield ValidationError("%r is too short" % (instance,)) + yield ValidationError(f"{instance!r} is too short") def maxLength(validator, mL, instance, schema): if validator.is_type(instance, "string") and len(instance) > mL: - yield ValidationError("%r is too long" % (instance,)) + yield ValidationError(f"{instance!r} is too long") -def dependencies(validator, dependencies, instance, schema): +def dependentRequired(validator, dependentRequired, instance, schema): if not validator.is_type(instance, "object"): return - for property, dependency in iteritems(dependencies): + for property, dependency in dependentRequired.items(): if property not in instance: continue - if validator.is_type(dependency, "array"): - for each in dependency: - if each not in instance: - message = "%r is a dependency of %r" - yield ValidationError(message % (each, property)) - else: - for error in validator.descend( - instance, dependency, schema_path=property, - ): - yield error + for each in dependency: + if each not in instance: + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + + +def dependentSchemas(validator, dependentSchemas, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependentSchemas.items(): + if property not in instance: + continue + yield from validator.descend( + instance, dependency, schema_path=property, + ) def enum(validator, enums, instance, schema): if instance == 0 or instance == 1: unbooled = unbool(instance) if all(unbooled != unbool(each) for each in enums): - yield ValidationError("%r is not one of %r" % (instance, enums)) + yield ValidationError(f"{instance!r} is not one of {enums!r}") elif instance not in enums: - yield ValidationError("%r is not one of %r" % (instance, enums)) + yield ValidationError(f"{instance!r} is not one of {enums!r}") def ref(validator, ref, instance, schema): resolve = getattr(validator.resolver, "resolve", None) if resolve is None: with validator.resolver.resolving(ref) as resolved: - for error in validator.descend(instance, resolved): - yield error + yield from validator.descend(instance, resolved) else: scope, resolved = validator.resolver.resolve(ref) validator.resolver.push_scope(scope) try: - for error in validator.descend(instance, resolved): - yield error + yield from validator.descend(instance, resolved) finally: validator.resolver.pop_scope() +def dynamicRef(validator, dynamicRef, instance, schema): + _, fragment = urldefrag(dynamicRef) + + for url in validator.resolver._scopes_stack: + lookup_url = urljoin(url, dynamicRef) + with validator.resolver.resolving(lookup_url) as subschema: + if ("$dynamicAnchor" in subschema + and fragment == subschema["$dynamicAnchor"]): + yield from validator.descend(instance, subschema) + break + else: + with validator.resolver.resolving(dynamicRef) as subschema: + yield from validator.descend(instance, subschema) + + def type(validator, types, instance, schema): types = ensure_list(types) if not any(validator.is_type(instance, type) for type in types): - yield ValidationError(types_msg(instance, types)) + reprs = ", ".join(repr(type) for type in types) + yield ValidationError(f"{instance!r} is not of type {reprs}") def properties(validator, properties, instance, schema): if not validator.is_type(instance, "object"): return - for property, subschema in iteritems(properties): + for property, subschema in properties.items(): if property in instance: - for error in validator.descend( + yield from validator.descend( instance[property], subschema, path=property, schema_path=property, - ): - yield error + ) def required(validator, required, instance, schema): @@ -293,27 +342,24 @@ def required(validator, required, instance, schema): return for property in required: if property not in instance: - yield ValidationError("%r is a required property" % property) + yield ValidationError(f"{property!r} is a required property") def minProperties(validator, mP, instance, schema): if validator.is_type(instance, "object") and len(instance) < mP: - yield ValidationError( - "%r does not have enough properties" % (instance,) - ) + yield ValidationError(f"{instance!r} does not have enough properties") def maxProperties(validator, mP, instance, schema): if not validator.is_type(instance, "object"): return if validator.is_type(instance, "object") and len(instance) > mP: - yield ValidationError("%r has too many properties" % (instance,)) + yield ValidationError(f"{instance!r} has too many properties") def allOf(validator, allOf, instance, schema): for index, subschema in enumerate(allOf): - for error in validator.descend(instance, subschema, schema_path=index): - yield error + yield from validator.descend(instance, subschema, schema_path=index) def anyOf(validator, anyOf, instance, schema): @@ -325,7 +371,7 @@ def anyOf(validator, anyOf, instance, schema): all_errors.extend(errs) else: yield ValidationError( - "%r is not valid under any of the given schemas" % (instance,), + f"{instance!r} is not valid under any of the given schemas", context=all_errors, ) @@ -341,33 +387,77 @@ def oneOf(validator, oneOf, instance, schema): all_errors.extend(errs) else: yield ValidationError( - "%r is not valid under any of the given schemas" % (instance,), + f"{instance!r} is not valid under any of the given schemas", context=all_errors, ) - more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)] + more_valid = [ + each for _, each in subschemas + if validator.evolve(schema=each).is_valid(instance) + ] if more_valid: more_valid.append(first_valid) reprs = ", ".join(repr(schema) for schema in more_valid) - yield ValidationError( - "%r is valid under each of %s" % (instance, reprs) - ) + yield ValidationError(f"{instance!r} is valid under each of {reprs}") def not_(validator, not_schema, instance, schema): - if validator.is_valid(instance, not_schema): - yield ValidationError( - "%r is not allowed for %r" % (not_schema, instance) - ) + if validator.evolve(schema=not_schema).is_valid(instance): + message = f"{instance!r} should not be valid under {not_schema!r}" + yield ValidationError(message) def if_(validator, if_schema, instance, schema): - if validator.is_valid(instance, if_schema): - if u"then" in schema: - then = schema[u"then"] - for error in validator.descend(instance, then, schema_path="then"): - yield error - elif u"else" in schema: - else_ = schema[u"else"] - for error in validator.descend(instance, else_, schema_path="else"): - yield error + if validator.evolve(schema=if_schema).is_valid(instance): + if "then" in schema: + then = schema["then"] + yield from validator.descend(instance, then, schema_path="then") + elif "else" in schema: + else_ = schema["else"] + yield from validator.descend(instance, else_, schema_path="else") + + +def unevaluatedItems(validator, unevaluatedItems, instance, schema): + evaluated_item_indexes = find_evaluated_item_indexes_by_schema( + validator, instance, schema, + ) + unevaluated_items = [ + item for index, item in enumerate(instance) + if index not in evaluated_item_indexes + ] + if unevaluated_items: + error = "Unevaluated items are not allowed (%s %s unexpected)" + yield ValidationError(error % extras_msg(unevaluated_items)) + + +def unevaluatedProperties(validator, unevaluatedProperties, instance, schema): + evaluated_property_keys = find_evaluated_property_keys_by_schema( + validator, instance, schema, + ) + unevaluated_property_keys = [] + for property in instance: + if property not in evaluated_property_keys: + for _ in validator.descend( + instance[property], + unevaluatedProperties, + path=property, + schema_path=property, + ): + unevaluated_property_keys.append(property) + + if unevaluated_property_keys: + error = "Unevaluated properties are not allowed (%s %s unexpected)" + yield ValidationError(error % extras_msg(unevaluated_property_keys)) + + +def prefixItems(validator, prefixItems, instance, schema): + if not validator.is_type(instance, "array"): + return + + for (index, item), subschema in zip(enumerate(instance), prefixItems): + yield from validator.descend( + instance=item, + schema=subschema, + schema_path=index, + path=index, + ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/issue232.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/issue232.py old mode 100755 new mode 100644 index 65e3aed..779f522 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/issue232.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/issue232.py @@ -1,19 +1,18 @@ -#!/usr/bin/env python """ A performance benchmark using the example from issue #232. See https://github.com/Julian/jsonschema/pull/232. """ -from twisted.python.filepath import FilePath +from pathlib import Path + from pyperf import Runner from pyrsistent import m from jsonschema.tests._suite import Version import jsonschema - issue232 = Version( - path=FilePath(__file__).sibling("issue232"), + path=Path(__file__).parent / "issue232", remotes=m(), name="issue232", ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/json_schema_test_suite.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/json_schema_test_suite.py old mode 100755 new mode 100644 index 5add505..905fb6a --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/json_schema_test_suite.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/benchmarks/json_schema_test_suite.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """ A performance benchmark using the official test suite. @@ -9,6 +8,5 @@ from jsonschema.tests._suite import Suite - if __name__ == "__main__": Suite().benchmark(runner=Runner()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/cli.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/cli.py old mode 100755 new mode 100644 index ab3335b..f5f6aef --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/cli.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/cli.py @@ -1,14 +1,133 @@ """ The ``jsonschema`` command line. """ -from __future__ import absolute_import + +from json import JSONDecodeError +from textwrap import dedent import argparse import json import sys +import traceback + +try: + from importlib import metadata +except ImportError: + import importlib_metadata as metadata # type: ignore + +import attr -from jsonschema import __version__ from jsonschema._reflect import namedAny -from jsonschema.validators import validator_for +from jsonschema.exceptions import SchemaError +from jsonschema.validators import RefResolver, validator_for + + +class _CannotLoadFile(Exception): + pass + + +@attr.s +class _Outputter(object): + + _formatter = attr.ib() + _stdout = attr.ib() + _stderr = attr.ib() + + @classmethod + def from_arguments(cls, arguments, stdout, stderr): + if arguments["output"] == "plain": + formatter = _PlainFormatter(arguments["error_format"]) + elif arguments["output"] == "pretty": + formatter = _PrettyFormatter() + return cls(formatter=formatter, stdout=stdout, stderr=stderr) + + def load(self, path): + try: + file = open(path) + except FileNotFoundError: + self.filenotfound_error(path=path, exc_info=sys.exc_info()) + raise _CannotLoadFile() + + with file: + try: + return json.load(file) + except JSONDecodeError: + self.parsing_error(path=path, exc_info=sys.exc_info()) + raise _CannotLoadFile() + + def filenotfound_error(self, **kwargs): + self._stderr.write(self._formatter.filenotfound_error(**kwargs)) + + def parsing_error(self, **kwargs): + self._stderr.write(self._formatter.parsing_error(**kwargs)) + + def validation_error(self, **kwargs): + self._stderr.write(self._formatter.validation_error(**kwargs)) + + def validation_success(self, **kwargs): + self._stdout.write(self._formatter.validation_success(**kwargs)) + + +@attr.s +class _PrettyFormatter(object): + + _ERROR_MSG = dedent( + """\ + ===[{type}]===({path})=== + + {body} + ----------------------------- + """, + ) + _SUCCESS_MSG = "===[SUCCESS]===({path})===\n" + + def filenotfound_error(self, path, exc_info): + return self._ERROR_MSG.format( + path=path, + type="FileNotFoundError", + body="{!r} does not exist.".format(path), + ) + + def parsing_error(self, path, exc_info): + exc_type, exc_value, exc_traceback = exc_info + exc_lines = "".join( + traceback.format_exception(exc_type, exc_value, exc_traceback), + ) + return self._ERROR_MSG.format( + path=path, + type=exc_type.__name__, + body=exc_lines, + ) + + def validation_error(self, instance_path, error): + return self._ERROR_MSG.format( + path=instance_path, + type=error.__class__.__name__, + body=error, + ) + + def validation_success(self, instance_path): + return self._SUCCESS_MSG.format(path=instance_path) + + +@attr.s +class _PlainFormatter(object): + + _error_format = attr.ib() + + def filenotfound_error(self, path, exc_info): + return "{!r} does not exist.\n".format(path) + + def parsing_error(self, path, exc_info): + return "Failed to parse {}: {}\n".format( + "" if path == "" else repr(path), + exc_info[1], + ) + + def validation_error(self, instance_path, error): + return self._error_format.format(file_name=instance_path, error=error) + + def validation_success(self, instance_path): + return "" def _namedAnyWithDefault(name): @@ -17,11 +136,6 @@ def _namedAnyWithDefault(name): return namedAny(name) -def _json_file(path): - with open(path) as file: - return json.load(file) - - parser = argparse.ArgumentParser( description="JSON Schema Validation CLI", ) @@ -29,62 +143,142 @@ def _json_file(path): "-i", "--instance", action="append", dest="instances", - type=_json_file, - help=( - "a path to a JSON instance (i.e. filename.json) " - "to validate (may be specified multiple times)" - ), + help=""" + a path to a JSON instance (i.e. filename.json) to validate (may + be specified multiple times). If no instances are provided via this + option, one will be expected on standard input. + """, ) parser.add_argument( "-F", "--error-format", - default="{error.instance}: {error.message}\n", - help=( - "the format to use for each error output message, specified in " - "a form suitable for passing to str.format, which will be called " - "with 'error' for each error" - ), + help=""" + the format to use for each validation error message, specified + in a form suitable for str.format. This string will be passed + one formatted object named 'error' for each ValidationError. + Only provide this option when using --output=plain, which is the + default. If this argument is unprovided and --output=plain is + used, a simple default representation will be used." + """, +) +parser.add_argument( + "-o", "--output", + choices=["plain", "pretty"], + default="plain", + help=""" + an output format to use. 'plain' (default) will produce minimal + text with one line for each error, while 'pretty' will produce + more detailed human-readable output on multiple lines. + """, ) parser.add_argument( "-V", "--validator", type=_namedAnyWithDefault, - help=( - "the fully qualified object name of a validator to use, or, for " - "validators that are registered with jsonschema, simply the name " - "of the class." - ), + help=""" + the fully qualified object name of a validator to use, or, for + validators that are registered with jsonschema, simply the name + of the class. + """, +) +parser.add_argument( + "--base-uri", + help=""" + a base URI to assign to the provided schema, even if it does not + declare one (via e.g. $id). This option can be used if you wish to + resolve relative references to a particular URI (or local path) + """, ) parser.add_argument( "--version", action="version", - version=__version__, + version=metadata.version("jsonschema"), ) parser.add_argument( "schema", - help="the JSON Schema to validate with (i.e. schema.json)", - type=_json_file, + help="the path to a JSON Schema to validate with (i.e. schema.json)", ) def parse_args(args): arguments = vars(parser.parse_args(args=args or ["--help"])) - if arguments["validator"] is None: - arguments["validator"] = validator_for(arguments["schema"]) + if arguments["output"] != "plain" and arguments["error_format"]: + raise parser.error( + "--error-format can only be used with --output plain", + ) + if arguments["output"] == "plain" and arguments["error_format"] is None: + arguments["error_format"] = "{error.instance}: {error.message}\n" return arguments +def _validate_instance(instance_path, instance, validator, outputter): + invalid = False + for error in validator.iter_errors(instance): + invalid = True + outputter.validation_error(instance_path=instance_path, error=error) + + if not invalid: + outputter.validation_success(instance_path=instance_path) + return invalid + + def main(args=sys.argv[1:]): sys.exit(run(arguments=parse_args(args=args))) -def run(arguments, stdout=sys.stdout, stderr=sys.stderr): - error_format = arguments["error_format"] - validator = arguments["validator"](schema=arguments["schema"]) +def run(arguments, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin): + outputter = _Outputter.from_arguments( + arguments=arguments, + stdout=stdout, + stderr=stderr, + ) + + try: + schema = outputter.load(arguments["schema"]) + except _CannotLoadFile: + return 1 + + if arguments["validator"] is None: + arguments["validator"] = validator_for(schema) + + try: + arguments["validator"].check_schema(schema) + except SchemaError as error: + outputter.validation_error( + instance_path=arguments["schema"], + error=error, + ) + return 1 + + if arguments["instances"]: + load, instances = outputter.load, arguments["instances"] + else: + def load(_): + try: + return json.load(stdin) + except JSONDecodeError: + outputter.parsing_error( + path="", exc_info=sys.exc_info(), + ) + raise _CannotLoadFile() + instances = [""] + + resolver = RefResolver( + base_uri=arguments["base_uri"], + referrer=schema, + ) if arguments["base_uri"] is not None else None - validator.check_schema(arguments["schema"]) + validator = arguments["validator"](schema, resolver=resolver) + exit_code = 0 + for each in instances: + try: + instance = load(each) + except _CannotLoadFile: + exit_code = 1 + else: + exit_code |= _validate_instance( + instance_path=each, + instance=instance, + validator=validator, + outputter=outputter, + ) - errored = False - for instance in arguments["instances"] or (): - for error in validator.iter_errors(instance): - stderr.write(error_format.format(error=error)) - errored = True - return errored + return exit_code diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/compat.py deleted file mode 100755 index 47e0980..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/compat.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Python 2/3 compatibility helpers. - -Note: This module is *not* public API. -""" -import contextlib -import operator -import sys - - -try: - from collections.abc import MutableMapping, Sequence # noqa -except ImportError: - from collections import MutableMapping, Sequence # noqa - -PY3 = sys.version_info[0] >= 3 - -if PY3: - zip = zip - from functools import lru_cache - from io import StringIO as NativeIO - from urllib.parse import ( - unquote, urljoin, urlunsplit, SplitResult, urlsplit - ) - from urllib.request import pathname2url, urlopen - str_types = str, - int_types = int, - iteritems = operator.methodcaller("items") -else: - from itertools import izip as zip # noqa - from io import BytesIO as NativeIO - from urlparse import urljoin, urlunsplit, SplitResult, urlsplit - from urllib import pathname2url, unquote # noqa - import urllib2 # noqa - def urlopen(*args, **kwargs): - return contextlib.closing(urllib2.urlopen(*args, **kwargs)) - - str_types = basestring - int_types = int, long - iteritems = operator.methodcaller("iteritems") - - from functools32 import lru_cache - - -def urldefrag(url): - if "#" in url: - s, n, p, q, frag = urlsplit(url) - defrag = urlunsplit((s, n, p, q, "")) - else: - defrag = url - frag = "" - return defrag, frag - - -# flake8: noqa diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/exceptions.py old mode 100755 new mode 100644 index 691dcff..274e6c5 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/exceptions.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/exceptions.py @@ -1,19 +1,19 @@ """ Validation errors, and some surrounding helpers. """ +from __future__ import annotations + from collections import defaultdict, deque +from pprint import pformat +from textwrap import dedent, indent import itertools -import pprint -import textwrap import attr from jsonschema import _utils -from jsonschema.compat import PY3, iteritems - -WEAK_MATCHES = frozenset(["anyOf", "oneOf"]) -STRONG_MATCHES = frozenset() +WEAK_MATCHES: frozenset[str] = frozenset(["anyOf", "oneOf"]) +STRONG_MATCHES: frozenset[str] = frozenset() _unset = _utils.Unset() @@ -59,40 +59,36 @@ def __init__( error.parent = self def __repr__(self): - return "<%s: %r>" % (self.__class__.__name__, self.message) + return f"<{self.__class__.__name__}: {self.message!r}>" - def __unicode__(self): + def __str__(self): essential_for_verbose = ( self.validator, self.validator_value, self.instance, self.schema, ) if any(m is _unset for m in essential_for_verbose): return self.message - pschema = pprint.pformat(self.schema, width=72) - pinstance = pprint.pformat(self.instance, width=72) - return self.message + textwrap.dedent(""" - - Failed validating %r in %s%s: - %s - - On %s%s: - %s - """.rstrip() - ) % ( - self.validator, - self._word_for_schema_in_error_message, - _utils.format_as_index(list(self.relative_schema_path)[:-1]), - _utils.indent(pschema), - self._word_for_instance_in_error_message, - _utils.format_as_index(self.relative_path), - _utils.indent(pinstance), + schema_path = _utils.format_as_index( + container=self._word_for_schema_in_error_message, + indices=list(self.relative_schema_path)[:-1], ) + instance_path = _utils.format_as_index( + container=self._word_for_instance_in_error_message, + indices=self.relative_path, + ) + prefix = 16 * " " + + return dedent( + f"""\ + {self.message} - if PY3: - __str__ = __unicode__ - else: - def __str__(self): - return unicode(self).encode("utf-8") + Failed validating {self.validator!r} in {schema_path}: + {indent(pformat(self.schema, width=72), prefix).lstrip()} + + On {instance_path}: + {indent(pformat(self.instance, width=72), prefix).lstrip()} + """.rstrip(), + ) @classmethod def create_from(cls, other): @@ -118,8 +114,18 @@ def absolute_schema_path(self): path.extendleft(reversed(parent.absolute_schema_path)) return path + @property + def json_path(self): + path = "$" + for elem in self.absolute_path: + if isinstance(elem, int): + path += "[" + str(elem) + "]" + else: + path += "." + elem + return path + def _set(self, **kwargs): - for k, v in iteritems(kwargs): + for k, v in kwargs.items(): if getattr(self, k) is _unset: setattr(self, k, v) @@ -169,14 +175,8 @@ class UndefinedTypeCheck(Exception): def __init__(self, type): self.type = type - def __unicode__(self): - return "Type %r is unknown to this type checker" % self.type - - if PY3: - __str__ = __unicode__ - else: - def __str__(self): - return unicode(self).encode("utf-8") + def __str__(self): + return f"Type {self.type!r} is unknown to this type checker" class UnknownType(Exception): @@ -189,23 +189,18 @@ def __init__(self, type, instance, schema): self.instance = instance self.schema = schema - def __unicode__(self): - pschema = pprint.pformat(self.schema, width=72) - pinstance = pprint.pformat(self.instance, width=72) - return textwrap.dedent(""" - Unknown type %r for validator with schema: - %s + def __str__(self): + prefix = 16 * " " - While checking instance: - %s - """.rstrip() - ) % (self.type, _utils.indent(pschema), _utils.indent(pinstance)) + return dedent( + f"""\ + Unknown type {self.type!r} for validator with schema: + {indent(pformat(self.schema, width=72), prefix).lstrip()} - if PY3: - __str__ = __unicode__ - else: - def __str__(self): - return unicode(self).encode("utf-8") + While checking instance: + {indent(pformat(self.instance, width=72), prefix).lstrip()} + """.rstrip(), + ) class FormatError(Exception): @@ -218,15 +213,9 @@ def __init__(self, message, cause=None): self.message = message self.cause = self.__cause__ = cause - def __unicode__(self): + def __str__(self): return self.message - if PY3: - __str__ = __unicode__ - else: - def __str__(self): - return self.message.encode("utf-8") - class ErrorTree(object): """ @@ -258,10 +247,10 @@ def __getitem__(self, index): """ Retrieve the child tree one level down at the given ``index``. - If the index is not in the instance that this tree corresponds to and - is not known by this tree, whatever error would be raised by - ``instance.__getitem__`` will be propagated (usually this is some - subclass of `exceptions.LookupError`. + If the index is not in the instance that this tree corresponds + to and is not known by this tree, whatever error would be raised + by ``instance.__getitem__`` will be propagated (usually this is + some subclass of `LookupError`. """ if self._instance is not _unset and index not in self: @@ -288,7 +277,7 @@ def __len__(self): return self.total_errors def __repr__(self): - return "<%s (%s total errors)>" % (self.__class__.__name__, len(self)) + return f"<{self.__class__.__name__} ({len(self)} total errors)>" @property def total_errors(self): @@ -296,7 +285,7 @@ def total_errors(self): The total number of errors in the entire tree, including children. """ - child_errors = sum(len(tree) for _, tree in iteritems(self._contents)) + child_errors = sum(len(tree) for _, tree in self._contents.items()) return len(self.errors) + child_errors @@ -339,14 +328,14 @@ def best_match(errors, key=relevance): not be relevant. Arguments: - errors (collections.Iterable): + errors (collections.abc.Iterable): the errors to select from. Do not provide a mixture of errors from different validation attempts (i.e. from different instances or schemas), since it won't produce sensical output. - key (collections.Callable): + key (collections.abc.Callable): the key to use when sorting errors. See `relevance` and transitively `by_relevance` for more details (the default is diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/protocols.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/protocols.py new file mode 100644 index 0000000..ea00446 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/protocols.py @@ -0,0 +1,167 @@ +""" +typing.Protocol classes for jsonschema interfaces. +""" + +# for reference material on Protocols, see +# https://www.python.org/dev/peps/pep-0544/ + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, ClassVar, Iterator +import sys + +# doing these imports with `try ... except ImportError` doesn't pass mypy +# checking because mypy sees `typing._SpecialForm` and +# `typing_extensions._SpecialForm` as incompatible +# +# see: +# https://mypy.readthedocs.io/en/stable/runtime_troubles.html#using-new-additions-to-the-typing-module +# https://github.com/python/mypy/issues/4427 +if sys.version_info >= (3, 8): + from typing import Protocol, runtime_checkable +else: + from typing_extensions import Protocol, runtime_checkable + +# in order for Sphinx to resolve references accurately from type annotations, +# it needs to see names like `jsonschema.TypeChecker` +# therefore, only import at type-checking time (to avoid circular references), +# but use `jsonschema` for any types which will otherwise not be resolvable +if TYPE_CHECKING: + import jsonschema + +from jsonschema.exceptions import ValidationError +from jsonschema.validators import RefResolver + +# For code authors working on the validator protocol, these are the three +# use-cases which should be kept in mind: +# +# 1. As a protocol class, it can be used in type annotations to describe the +# available methods and attributes of a validator +# 2. It is the source of autodoc for the validator documentation +# 3. It is runtime_checkable, meaning that it can be used in isinstance() +# checks. +# +# Since protocols are not base classes, isinstance() checking is limited in +# its capabilities. See docs on runtime_checkable for detail + + +@runtime_checkable +class Validator(Protocol): + """ + The protocol to which all validator classes should adhere. + + :argument schema: the schema that the validator object + will validate with. It is assumed to be valid, and providing + an invalid schema can lead to undefined behavior. See + `Validator.check_schema` to validate a schema first. + :argument resolver: an instance of `jsonschema.RefResolver` that will be + used to resolve :validator:`$ref` properties (JSON references). If + unprovided, one will be created. + :argument format_checker: an instance of `jsonschema.FormatChecker` + whose `jsonschema.FormatChecker.conforms` method will be called to + check and see if instances conform to each :validator:`format` + property present in the schema. If unprovided, no validation + will be done for :validator:`format`. Certain formats require + additional packages to be installed (ipv5, uri, color, date-time). + The required packages can be found at the bottom of this page. + """ + + #: An object representing the validator's meta schema (the schema that + #: describes valid schemas in the given version). + META_SCHEMA: ClassVar[dict] + + #: A mapping of validator names (`str`\s) to functions + #: that validate the validator property with that name. For more + #: information see `creating-validators`. + VALIDATORS: ClassVar[dict] + + #: A `jsonschema.TypeChecker` that will be used when validating + #: :validator:`type` properties in JSON schemas. + TYPE_CHECKER: ClassVar[jsonschema.TypeChecker] + + #: The schema that was passed in when initializing the object. + schema: dict | bool + + def __init__( + self, + schema: dict | bool, + resolver: RefResolver | None = None, + format_checker: jsonschema.FormatChecker | None = None, + ) -> None: + ... + + @classmethod + def check_schema(cls, schema: dict) -> None: + """ + Validate the given schema against the validator's `META_SCHEMA`. + + :raises: `jsonschema.exceptions.SchemaError` if the schema + is invalid + """ + + def is_type(self, instance: Any, type: str) -> bool: + """ + Check if the instance is of the given (JSON Schema) type. + + :type type: str + :rtype: bool + :raises: `jsonschema.exceptions.UnknownType` if ``type`` + is not a known type. + """ + + def is_valid(self, instance: dict) -> bool: + """ + Check if the instance is valid under the current `schema`. + + :rtype: bool + + >>> schema = {"maxItems" : 2} + >>> Draft3Validator(schema).is_valid([2, 3, 4]) + False + """ + + def iter_errors(self, instance: dict) -> Iterator[ValidationError]: + r""" + Lazily yield each of the validation errors in the given instance. + + :rtype: an `collections.abc.Iterable` of + `jsonschema.exceptions.ValidationError`\s + + >>> schema = { + ... "type" : "array", + ... "items" : {"enum" : [1, 2, 3]}, + ... "maxItems" : 2, + ... } + >>> v = Draft3Validator(schema) + >>> for error in sorted(v.iter_errors([2, 3, 4]), key=str): + ... print(error.message) + 4 is not one of [1, 2, 3] + [2, 3, 4] is too long + """ + + def validate(self, instance: dict) -> None: + """ + Check if the instance is valid under the current `schema`. + + :raises: `jsonschema.exceptions.ValidationError` if the + instance is invalid + + >>> schema = {"maxItems" : 2} + >>> Draft3Validator(schema).validate([2, 3, 4]) + Traceback (most recent call last): + ... + ValidationError: [2, 3, 4] is too long + """ + + def evolve(self, **kwargs) -> "Validator": + """ + Create a new validator like this one, but with given changes. + + Preserves all other attributes, so can be used to e.g. create a + validator with a different schema but with the same :validator:`$ref` + resolution behavior. + + >>> validator = Draft202012Validator({}) + >>> validator.evolve(schema={"type": "number"}) + Draft202012Validator(schema={'type': 'number'}, format_checker=None) + """ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft2019-09.json b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft2019-09.json new file mode 100644 index 0000000..2248a0c --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft2019-09.json @@ -0,0 +1,42 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft2020-12.json b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft2020-12.json new file mode 100644 index 0000000..d5e2d31 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft2020-12.json @@ -0,0 +1,58 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft3.json b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft3.json index f8a09c5..23d59b6 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft3.json +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft3.json @@ -1,199 +1,177 @@ { - "$schema": "http://json-schema.org/draft-03/schema#", - "dependencies": { - "exclusiveMaximum": "maximum", - "exclusiveMinimum": "minimum" - }, - "id": "http://json-schema.org/draft-03/schema#", - "properties": { - "$ref": { - "format": "uri", - "type": "string" - }, - "$schema": { - "format": "uri", - "type": "string" - }, - "additionalItems": { - "default": {}, - "type": [ - { - "$ref": "#" - }, - "boolean" - ] - }, - "additionalProperties": { - "default": {}, - "type": [ - { - "$ref": "#" - }, - "boolean" - ] - }, - "default": { - "type": "any" - }, - "dependencies": { - "additionalProperties": { - "items": { - "type": "string" - }, - "type": [ - "string", - "array", - { - "$ref": "#" - } - ] - }, - "default": {}, - "type": [ - "string", - "array", - "object" - ] - }, - "description": { - "type": "string" - }, - "disallow": { - "items": { - "type": [ - "string", - { - "$ref": "#" - } - ] - }, - "type": [ - "string", - "array" - ], - "uniqueItems": true - }, - "divisibleBy": { - "default": 1, - "exclusiveMinimum": true, - "minimum": 0, - "type": "number" - }, - "enum": { - "type": "array" - }, - "exclusiveMaximum": { - "default": false, - "type": "boolean" - }, - "exclusiveMinimum": { - "default": false, - "type": "boolean" - }, - "extends": { - "default": {}, - "items": { - "$ref": "#" - }, - "type": [ - { - "$ref": "#" - }, - "array" - ] - }, - "format": { - "type": "string" - }, - "id": { - "format": "uri", - "type": "string" - }, - "items": { - "default": {}, - "items": { - "$ref": "#" - }, - "type": [ - { - "$ref": "#" - }, - "array" - ] - }, - "maxDecimal": { - "minimum": 0, - "type": "number" - }, - "maxItems": { - "minimum": 0, - "type": "integer" - }, - "maxLength": { - "type": "integer" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "default": 0, - "minimum": 0, - "type": "integer" - }, - "minLength": { - "default": 0, - "minimum": 0, - "type": "integer" - }, - "minimum": { - "type": "number" - }, - "pattern": { - "format": "regex", - "type": "string" - }, - "patternProperties": { - "additionalProperties": { - "$ref": "#" - }, - "default": {}, - "type": "object" - }, - "properties": { - "additionalProperties": { - "$ref": "#", - "type": "object" - }, - "default": {}, - "type": "object" - }, - "required": { - "default": false, - "type": "boolean" - }, - "title": { - "type": "string" - }, - "type": { - "default": "any", - "items": { - "type": [ - "string", - { - "$ref": "#" - } - ] - }, - "type": [ - "string", - "array" - ], - "uniqueItems": true - }, - "uniqueItems": { - "default": false, - "type": "boolean" - } - }, - "type": "object" + "$schema" : "http://json-schema.org/draft-03/schema#", + "id" : "http://json-schema.org/draft-03/schema#", + "type" : "object", + + "properties" : { + "type" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true, + "default" : "any" + }, + + "properties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#", "type" : "object"}, + "default" : {} + }, + + "patternProperties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalProperties" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "items" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalItems" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "required" : { + "type" : "boolean", + "default" : false + }, + + "dependencies" : { + "type" : ["string", "array", "object"], + "additionalProperties" : { + "type" : ["string", "array", {"$ref" : "#"}], + "items" : { + "type" : "string" + } + }, + "default" : {} + }, + + "minimum" : { + "type" : "number" + }, + + "maximum" : { + "type" : "number" + }, + + "exclusiveMinimum" : { + "type" : "boolean", + "default" : false + }, + + "exclusiveMaximum" : { + "type" : "boolean", + "default" : false + }, + + "maxDecimal": { + "minimum": 0, + "type": "number" + }, + + "minItems" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxItems" : { + "type" : "integer", + "minimum" : 0 + }, + + "uniqueItems" : { + "type" : "boolean", + "default" : false + }, + + "pattern" : { + "type" : "string", + "format" : "regex" + }, + + "minLength" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxLength" : { + "type" : "integer" + }, + + "enum" : { + "type" : "array" + }, + + "default" : { + "type" : "any" + }, + + "title" : { + "type" : "string" + }, + + "description" : { + "type" : "string" + }, + + "format" : { + "type" : "string" + }, + + "divisibleBy" : { + "type" : "number", + "minimum" : 0, + "exclusiveMinimum" : true, + "default" : 1 + }, + + "disallow" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true + }, + + "extends" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "id" : { + "type" : "string", + "format" : "uri" + }, + + "$ref" : { + "type" : "string", + "format" : "uri" + }, + + "$schema" : { + "type" : "string", + "format" : "uri" + } + }, + + "dependencies" : { + "exclusiveMinimum" : "minimum", + "exclusiveMaximum" : "maximum" + }, + + "default" : {} } diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft4.json b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft4.json index 9b666cf..ba0c117 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft4.json +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/draft4.json @@ -1,222 +1,149 @@ { + "id": "http://json-schema.org/draft-04/schema#", "$schema": "http://json-schema.org/draft-04/schema#", - "default": {}, + "description": "Core schema meta-schema", "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, "positiveInteger": { - "minimum": 0, - "type": "integer" + "type": "integer", + "minimum": 0 }, "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "schemaArray": { - "items": { - "$ref": "#" - }, - "minItems": 1, - "type": "array" + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] }, "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] }, "stringArray": { - "items": { - "type": "string" - }, - "minItems": 1, "type": "array", + "items": { "type": "string" }, + "minItems": 1, "uniqueItems": true } }, - "dependencies": { - "exclusiveMaximum": [ - "maximum" - ], - "exclusiveMinimum": [ - "minimum" - ] - }, - "description": "Core schema meta-schema", - "id": "http://json-schema.org/draft-04/schema#", + "type": "object", "properties": { - "$schema": { + "id": { "format": "uri", "type": "string" }, - "additionalItems": { - "anyOf": [ - { - "type": "boolean" - }, - { - "$ref": "#" - } - ], - "default": {} - }, - "additionalProperties": { - "anyOf": [ - { - "type": "boolean" - }, - { - "$ref": "#" - } - ], - "default": {} - }, - "allOf": { - "$ref": "#/definitions/schemaArray" - }, - "anyOf": { - "$ref": "#/definitions/schemaArray" - }, - "default": {}, - "definitions": { - "additionalProperties": { - "$ref": "#" - }, - "default": {}, - "type": "object" + "$schema": { + "type": "string", + "format": "uri" }, - "dependencies": { - "additionalProperties": { - "anyOf": [ - { - "$ref": "#" - }, - { - "$ref": "#/definitions/stringArray" - } - ] - }, - "type": "object" + "title": { + "type": "string" }, "description": { "type": "string" }, - "enum": { - "type": "array" + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" }, "exclusiveMaximum": { - "default": false, - "type": "boolean" + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" }, "exclusiveMinimum": { - "default": false, - "type": "boolean" + "type": "boolean", + "default": false }, - "format": { - "type": "string" + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" }, - "id": { - "format": "uri", - "type": "string" + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} }, "items": { "anyOf": [ - { - "$ref": "#" - }, - { - "$ref": "#/definitions/schemaArray" - } + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } ], "default": {} }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "maxProperties": { - "$ref": "#/definitions/positiveInteger" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minProperties": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "minimum": { - "type": "number" - }, - "multipleOf": { - "exclusiveMinimum": true, - "minimum": 0, - "type": "number" + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false }, - "not": { - "$ref": "#" + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} }, - "oneOf": { - "$ref": "#/definitions/schemaArray" + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} }, - "pattern": { - "format": "regex", - "type": "string" + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} }, "patternProperties": { - "additionalProperties": { - "$ref": "#" - }, - "default": {}, - "type": "object" + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} }, - "properties": { + "dependencies": { + "type": "object", "additionalProperties": { - "$ref": "#" - }, - "default": {}, - "type": "object" - }, - "required": { - "$ref": "#/definitions/stringArray" + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } }, - "title": { - "type": "string" + "enum": { + "type": "array" }, "type": { "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, { - "$ref": "#/definitions/simpleTypes" - }, - { - "items": { - "$ref": "#/definitions/simpleTypes" - }, - "minItems": 1, "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, "uniqueItems": true } ] }, - "uniqueItems": { - "default": false, - "type": "boolean" - } + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] }, - "type": "object" + "default": {} } diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/vocabularies.json b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/vocabularies.json new file mode 100644 index 0000000..bca1705 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/schemas/vocabularies.json @@ -0,0 +1 @@ +{"https://json-schema.org/draft/2020-12/meta/content": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/content", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/content": true}, "$dynamicAnchor": "meta", "title": "Content vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"contentEncoding": {"type": "string"}, "contentMediaType": {"type": "string"}, "contentSchema": {"$dynamicRef": "#meta"}}}, "https://json-schema.org/draft/2020-12/meta/unevaluated": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/unevaluated": true}, "$dynamicAnchor": "meta", "title": "Unevaluated applicator vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"unevaluatedItems": {"$dynamicRef": "#meta"}, "unevaluatedProperties": {"$dynamicRef": "#meta"}}}, "https://json-schema.org/draft/2020-12/meta/format-annotation": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/format-annotation": true}, "$dynamicAnchor": "meta", "title": "Format vocabulary meta-schema for annotation results", "type": ["object", "boolean"], "properties": {"format": {"type": "string"}}}, "https://json-schema.org/draft/2020-12/meta/applicator": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/applicator", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/applicator": true}, "$dynamicAnchor": "meta", "title": "Applicator vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"prefixItems": {"$ref": "#/$defs/schemaArray"}, "items": {"$dynamicRef": "#meta"}, "contains": {"$dynamicRef": "#meta"}, "additionalProperties": {"$dynamicRef": "#meta"}, "properties": {"type": "object", "additionalProperties": {"$dynamicRef": "#meta"}, "default": {}}, "patternProperties": {"type": "object", "additionalProperties": {"$dynamicRef": "#meta"}, "propertyNames": {"format": "regex"}, "default": {}}, "dependentSchemas": {"type": "object", "additionalProperties": {"$dynamicRef": "#meta"}, "default": {}}, "propertyNames": {"$dynamicRef": "#meta"}, "if": {"$dynamicRef": "#meta"}, "then": {"$dynamicRef": "#meta"}, "else": {"$dynamicRef": "#meta"}, "allOf": {"$ref": "#/$defs/schemaArray"}, "anyOf": {"$ref": "#/$defs/schemaArray"}, "oneOf": {"$ref": "#/$defs/schemaArray"}, "not": {"$dynamicRef": "#meta"}}, "$defs": {"schemaArray": {"type": "array", "minItems": 1, "items": {"$dynamicRef": "#meta"}}}}, "https://json-schema.org/draft/2020-12/meta/meta-data": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/meta-data": true}, "$dynamicAnchor": "meta", "title": "Meta-data vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"title": {"type": "string"}, "description": {"type": "string"}, "default": true, "deprecated": {"type": "boolean", "default": false}, "readOnly": {"type": "boolean", "default": false}, "writeOnly": {"type": "boolean", "default": false}, "examples": {"type": "array", "items": true}}}, "https://json-schema.org/draft/2020-12/meta/core": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/core", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/core": true}, "$dynamicAnchor": "meta", "title": "Core vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"$id": {"$ref": "#/$defs/uriReferenceString", "$comment": "Non-empty fragments not allowed.", "pattern": "^[^#]*#?$"}, "$schema": {"$ref": "#/$defs/uriString"}, "$ref": {"$ref": "#/$defs/uriReferenceString"}, "$anchor": {"$ref": "#/$defs/anchorString"}, "$dynamicRef": {"$ref": "#/$defs/uriReferenceString"}, "$dynamicAnchor": {"$ref": "#/$defs/anchorString"}, "$vocabulary": {"type": "object", "propertyNames": {"$ref": "#/$defs/uriString"}, "additionalProperties": {"type": "boolean"}}, "$comment": {"type": "string"}, "$defs": {"type": "object", "additionalProperties": {"$dynamicRef": "#meta"}}}, "$defs": {"anchorString": {"type": "string", "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$"}, "uriString": {"type": "string", "format": "uri"}, "uriReferenceString": {"type": "string", "format": "uri-reference"}}}, "https://json-schema.org/draft/2020-12/meta/validation": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/validation", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/validation": true}, "$dynamicAnchor": "meta", "title": "Validation vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"type": {"anyOf": [{"$ref": "#/$defs/simpleTypes"}, {"type": "array", "items": {"$ref": "#/$defs/simpleTypes"}, "minItems": 1, "uniqueItems": true}]}, "const": true, "enum": {"type": "array", "items": true}, "multipleOf": {"type": "number", "exclusiveMinimum": 0}, "maximum": {"type": "number"}, "exclusiveMaximum": {"type": "number"}, "minimum": {"type": "number"}, "exclusiveMinimum": {"type": "number"}, "maxLength": {"$ref": "#/$defs/nonNegativeInteger"}, "minLength": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "pattern": {"type": "string", "format": "regex"}, "maxItems": {"$ref": "#/$defs/nonNegativeInteger"}, "minItems": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "uniqueItems": {"type": "boolean", "default": false}, "maxContains": {"$ref": "#/$defs/nonNegativeInteger"}, "minContains": {"$ref": "#/$defs/nonNegativeInteger", "default": 1}, "maxProperties": {"$ref": "#/$defs/nonNegativeInteger"}, "minProperties": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "required": {"$ref": "#/$defs/stringArray"}, "dependentRequired": {"type": "object", "additionalProperties": {"$ref": "#/$defs/stringArray"}}}, "$defs": {"nonNegativeInteger": {"type": "integer", "minimum": 0}, "nonNegativeIntegerDefault0": {"$ref": "#/$defs/nonNegativeInteger", "default": 0}, "simpleTypes": {"enum": ["array", "boolean", "integer", "null", "number", "object", "string"]}, "stringArray": {"type": "array", "items": {"type": "string"}, "uniqueItems": true, "default": []}}}, "https://json-schema.org/draft/2019-09/meta/content": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/content", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/content": true}, "$recursiveAnchor": true, "title": "Content vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"contentMediaType": {"type": "string"}, "contentEncoding": {"type": "string"}, "contentSchema": {"$recursiveRef": "#"}}}, "https://json-schema.org/draft/2019-09/meta/applicator": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/applicator", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/applicator": true}, "$recursiveAnchor": true, "title": "Applicator vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"additionalItems": {"$recursiveRef": "#"}, "unevaluatedItems": {"$recursiveRef": "#"}, "items": {"anyOf": [{"$recursiveRef": "#"}, {"$ref": "#/$defs/schemaArray"}]}, "contains": {"$recursiveRef": "#"}, "additionalProperties": {"$recursiveRef": "#"}, "unevaluatedProperties": {"$recursiveRef": "#"}, "properties": {"type": "object", "additionalProperties": {"$recursiveRef": "#"}, "default": {}}, "patternProperties": {"type": "object", "additionalProperties": {"$recursiveRef": "#"}, "propertyNames": {"format": "regex"}, "default": {}}, "dependentSchemas": {"type": "object", "additionalProperties": {"$recursiveRef": "#"}}, "propertyNames": {"$recursiveRef": "#"}, "if": {"$recursiveRef": "#"}, "then": {"$recursiveRef": "#"}, "else": {"$recursiveRef": "#"}, "allOf": {"$ref": "#/$defs/schemaArray"}, "anyOf": {"$ref": "#/$defs/schemaArray"}, "oneOf": {"$ref": "#/$defs/schemaArray"}, "not": {"$recursiveRef": "#"}}, "$defs": {"schemaArray": {"type": "array", "minItems": 1, "items": {"$recursiveRef": "#"}}}}, "https://json-schema.org/draft/2019-09/meta/meta-data": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/meta-data": true}, "$recursiveAnchor": true, "title": "Meta-data vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"title": {"type": "string"}, "description": {"type": "string"}, "default": true, "deprecated": {"type": "boolean", "default": false}, "readOnly": {"type": "boolean", "default": false}, "writeOnly": {"type": "boolean", "default": false}, "examples": {"type": "array", "items": true}}}, "https://json-schema.org/draft/2019-09/meta/core": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/core", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/core": true}, "$recursiveAnchor": true, "title": "Core vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"$id": {"type": "string", "format": "uri-reference", "$comment": "Non-empty fragments not allowed.", "pattern": "^[^#]*#?$"}, "$schema": {"type": "string", "format": "uri"}, "$anchor": {"type": "string", "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$"}, "$ref": {"type": "string", "format": "uri-reference"}, "$recursiveRef": {"type": "string", "format": "uri-reference"}, "$recursiveAnchor": {"type": "boolean", "default": false}, "$vocabulary": {"type": "object", "propertyNames": {"type": "string", "format": "uri"}, "additionalProperties": {"type": "boolean"}}, "$comment": {"type": "string"}, "$defs": {"type": "object", "additionalProperties": {"$recursiveRef": "#"}, "default": {}}}}, "https://json-schema.org/draft/2019-09/meta/validation": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/validation", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/validation": true}, "$recursiveAnchor": true, "title": "Validation vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"multipleOf": {"type": "number", "exclusiveMinimum": 0}, "maximum": {"type": "number"}, "exclusiveMaximum": {"type": "number"}, "minimum": {"type": "number"}, "exclusiveMinimum": {"type": "number"}, "maxLength": {"$ref": "#/$defs/nonNegativeInteger"}, "minLength": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "pattern": {"type": "string", "format": "regex"}, "maxItems": {"$ref": "#/$defs/nonNegativeInteger"}, "minItems": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "uniqueItems": {"type": "boolean", "default": false}, "maxContains": {"$ref": "#/$defs/nonNegativeInteger"}, "minContains": {"$ref": "#/$defs/nonNegativeInteger", "default": 1}, "maxProperties": {"$ref": "#/$defs/nonNegativeInteger"}, "minProperties": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "required": {"$ref": "#/$defs/stringArray"}, "dependentRequired": {"type": "object", "additionalProperties": {"$ref": "#/$defs/stringArray"}}, "const": true, "enum": {"type": "array", "items": true}, "type": {"anyOf": [{"$ref": "#/$defs/simpleTypes"}, {"type": "array", "items": {"$ref": "#/$defs/simpleTypes"}, "minItems": 1, "uniqueItems": true}]}}, "$defs": {"nonNegativeInteger": {"type": "integer", "minimum": 0}, "nonNegativeIntegerDefault0": {"$ref": "#/$defs/nonNegativeInteger", "default": 0}, "simpleTypes": {"enum": ["array", "boolean", "integer", "null", "number", "object", "string"]}, "stringArray": {"type": "array", "items": {"type": "string"}, "uniqueItems": true, "default": []}}}, "https://json-schema.org/draft/2019-09/meta/hyper-schema": {"$schema": "https://json-schema.org/draft/2019-09/hyper-schema", "$id": "https://json-schema.org/draft/2019-09/meta/hyper-schema", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/hyper-schema": true}, "$recursiveAnchor": true, "title": "JSON Hyper-Schema Vocabulary Schema", "type": ["object", "boolean"], "properties": {"base": {"type": "string", "format": "uri-template"}, "links": {"type": "array", "items": {"$ref": "https://json-schema.org/draft/2019-09/links"}}}, "links": [{"rel": "self", "href": "{+%24id}"}]}, "https://json-schema.org/draft/2019-09/meta/format": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/format", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/format": true}, "$recursiveAnchor": true, "title": "Format vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"format": {"type": "string"}}}} diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/_helpers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/_helpers.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/_suite.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/_suite.py old mode 100755 new mode 100644 index b68a7b6..870304d --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/_suite.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/_suite.py @@ -3,6 +3,7 @@ """ from functools import partial +from pathlib import Path import json import os import re @@ -10,21 +11,19 @@ import sys import unittest -from twisted.python.filepath import FilePath import attr -from jsonschema.compat import PY3 -from jsonschema.validators import validators +from jsonschema.validators import _VALIDATORS import jsonschema def _find_suite(): root = os.environ.get("JSON_SCHEMA_TEST_SUITE") if root is not None: - return FilePath(root) + return Path(root) - root = FilePath(jsonschema.__file__).parent().sibling("json") - if not root.isdir(): # pragma: no cover + root = Path(jsonschema.__file__).parent.parent / "json" + if not root.is_dir(): # pragma: no cover raise ValueError( ( "Can't find the JSON-Schema-Test-Suite directory. " @@ -42,23 +41,26 @@ class Suite(object): _root = attr.ib(default=attr.Factory(_find_suite)) def _remotes(self): - jsonschema_suite = self._root.descendant(["bin", "jsonschema_suite"]) + jsonschema_suite = self._root.joinpath("bin", "jsonschema_suite") remotes = subprocess.check_output( - [sys.executable, jsonschema_suite.path, "remotes"], + [sys.executable, str(jsonschema_suite), "remotes"], ) return { - "http://localhost:1234/" + name: schema + "http://localhost:1234/" + name.replace("\\", "/"): schema for name, schema in json.loads(remotes.decode("utf-8")).items() } def benchmark(self, runner): # pragma: no cover - for name in validators: - self.version(name=name).benchmark(runner=runner) + for name, Validator in _VALIDATORS.items(): + self.version(name=name).benchmark( + runner=runner, + Validator=Validator, + ) def version(self, name): return Version( name=name, - path=self._root.descendant(["tests", name]), + path=self._root.joinpath("tests", name), remotes=self._remotes(), ) @@ -82,38 +84,32 @@ def benchmark(self, runner, **kwargs): # pragma: no cover def tests(self): return ( test - for child in self._path.globChildren("*.json") + for child in self._path.glob("*.json") for test in self._tests_in( - subject=child.basename()[:-5], + subject=child.name[:-5], path=child, ) ) def format_tests(self): - path = self._path.descendant(["optional", "format"]) + path = self._path.joinpath("optional", "format") return ( test - for child in path.globChildren("*.json") + for child in path.glob("*.json") for test in self._tests_in( - subject=child.basename()[:-5], + subject=child.name[:-5], path=child, ) ) - def tests_of(self, name): - return self._tests_in( - subject=name, - path=self._path.child(name + ".json"), - ) - def optional_tests_of(self, name): return self._tests_in( subject=name, - path=self._path.descendant(["optional", name + ".json"]), + path=self._path.joinpath("optional", name + ".json"), ) def to_unittest_testcase(self, *suites, **kwargs): - name = kwargs.pop("name", "Test" + self.name.title()) + name = kwargs.pop("name", "Test" + self.name.title().replace("-", "")) methods = { test.method_name: test.to_unittest_method(**kwargs) for suite in suites @@ -133,7 +129,7 @@ def to_unittest_testcase(self, *suites, **kwargs): return cls def _tests_in(self, subject, path): - for each in json.loads(path.getContent().decode("utf-8")): + for each in json.loads(path.read_text(encoding="utf-8")): yield ( _Test( version=self, @@ -141,7 +137,7 @@ def _tests_in(self, subject, path): case_description=each["description"], schema=each["schema"], remotes=self._remotes, - **test + **test, ) for test in each["tests"] ) @@ -162,6 +158,8 @@ class _Test(object): _remotes = attr.ib() + comment = attr.ib(default=None) + def __repr__(self): # pragma: no cover return "".format(self.fully_qualified_name) @@ -173,22 +171,18 @@ def fully_qualified_name(self): # pragma: no cover self.subject, self.case_description, self.description, - ] + ], ) @property def method_name(self): delimiters = r"[\W\- ]+" - name = "test_%s_%s_%s" % ( + return "test_{}_{}_{}".format( re.sub(delimiters, "_", self.subject), re.sub(delimiters, "_", self.case_description), re.sub(delimiters, "_", self.description), ) - if not PY3: # pragma: no cover - name = name.encode("utf-8") - return name - def to_unittest_method(self, skip=lambda test: None, **kwargs): if self.valid: def fn(this): @@ -208,13 +202,8 @@ def validate(self, Validator, **kwargs): store=self._remotes, id_of=Validator.ID_OF, ) - jsonschema.validate( - instance=self.data, - schema=self.schema, - cls=Validator, - resolver=resolver, - **kwargs - ) + validator = Validator(schema=self.schema, resolver=resolver, **kwargs) + validator.validate(instance=self.data) def validate_ignoring_errors(self, Validator): # pragma: no cover try: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/fuzz_validate.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/fuzz_validate.py new file mode 100644 index 0000000..a8c62ac --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/fuzz_validate.py @@ -0,0 +1,49 @@ +""" +Fuzzing setup for OSS-Fuzz. + +See https://github.com/google/oss-fuzz/tree/master/projects/jsonschema for the +other half of the setup here. +""" +import sys + +from hypothesis import given, strategies + +import jsonschema + +PRIM = strategies.one_of( + strategies.booleans(), + strategies.integers(), + strategies.floats(allow_nan=False, allow_infinity=False), + strategies.text(), +) +DICT = strategies.recursive( + base=strategies.one_of( + strategies.booleans(), + strategies.dictionaries(strategies.text(), PRIM), + ), + extend=lambda inner: strategies.dictionaries(strategies.text(), inner), +) + + +@given(obj1=DICT, obj2=DICT) +def test_schemas(obj1, obj2): + try: + jsonschema.validate(instance=obj1, schema=obj2) + except jsonschema.exceptions.ValidationError: + pass + except jsonschema.exceptions.SchemaError: + pass + + +def main(): + atheris.Setup( + sys.argv, + test_schemas.hypothesis.fuzz_one_input, + enable_python_coverage=True, + ) + atheris.Fuzz() + + +if __name__ == "__main__": + import atheris + main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_cli.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_cli.py old mode 100755 new mode 100644 index ed820ba..6d42af6 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_cli.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_cli.py @@ -1,11 +1,29 @@ +from contextlib import redirect_stderr, redirect_stdout +from io import StringIO +from json import JSONDecodeError +from pathlib import Path +from textwrap import dedent from unittest import TestCase import json +import os import subprocess import sys +import tempfile -from jsonschema import Draft4Validator, ValidationError, cli, __version__ -from jsonschema.compat import NativeIO -from jsonschema.exceptions import SchemaError +try: # pragma: no cover + from importlib import metadata +except ImportError: # pragma: no cover + import importlib_metadata as metadata # type: ignore + +from pyrsistent import m + +from jsonschema import Draft4Validator, Draft202012Validator, cli +from jsonschema.exceptions import ( + RefResolutionError, + SchemaError, + ValidationError, +) +from jsonschema.validators import _LATEST_VERSION, validate def fake_validator(*errors): @@ -18,41 +36,812 @@ def __init__(self, *args, **kwargs): def iter_errors(self, instance): if errors: return errors.pop() - return [] + return [] # pragma: no cover + @classmethod def check_schema(self, schema): pass return FakeValidator +def fake_open(all_contents): + def open(path): + contents = all_contents.get(path) + if contents is None: + raise FileNotFoundError(path) + return StringIO(contents) + return open + + +def _message_for(non_json): + try: + json.loads(non_json) + except JSONDecodeError as error: + return str(error) + else: # pragma: no cover + raise RuntimeError("Tried and failed to capture a JSON dump error.") + + +class TestCLI(TestCase): + def run_cli( + self, argv, files=m(), stdin=StringIO(), exit_code=0, **override, + ): + arguments = cli.parse_args(argv) + arguments.update(override) + + self.assertFalse(hasattr(cli, "open")) + cli.open = fake_open(files) + try: + stdout, stderr = StringIO(), StringIO() + actual_exit_code = cli.run( + arguments, + stdin=stdin, + stdout=stdout, + stderr=stderr, + ) + finally: + del cli.open + + self.assertEqual( + actual_exit_code, exit_code, msg=dedent( + """ + Expected an exit code of {} != {}. + + stdout: {} + + stderr: {} + """.format( + exit_code, + actual_exit_code, + stdout.getvalue(), + stderr.getvalue(), + ), + ), + ) + return stdout.getvalue(), stderr.getvalue() + + def assertOutputs(self, stdout="", stderr="", **kwargs): + self.assertEqual( + self.run_cli(**kwargs), + (dedent(stdout), dedent(stderr)), + ) + + def test_invalid_instance(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="12: I am an error!\n", + ) + + def test_invalid_instance_pretty_output(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["-i", "some_instance", "--output", "pretty", "some_schema"], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_instance)=== + + I am an error! + ----------------------------- + """, + ) + + def test_invalid_instance_explicit_plain_output(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["--output", "plain", "-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="12: I am an error!\n", + ) + + def test_invalid_instance_multiple_errors(self): + instance = 12 + first = ValidationError("First error", instance=instance) + second = ValidationError("Second error", instance=instance) + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(instance), + ), + validator=fake_validator([first, second]), + + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 12: First error + 12: Second error + """, + ) + + def test_invalid_instance_multiple_errors_pretty_output(self): + instance = 12 + first = ValidationError("First error", instance=instance) + second = ValidationError("Second error", instance=instance) + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(instance), + ), + validator=fake_validator([first, second]), + + argv=["-i", "some_instance", "--output", "pretty", "some_schema"], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_instance)=== + + First error + ----------------------------- + ===[ValidationError]===(some_instance)=== + + Second error + ----------------------------- + """, + ) + + def test_multiple_invalid_instances(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + 12: An error + 12: Another error + foo: BOOM + """, + ) + + def test_multiple_invalid_instances_pretty_output(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "--output", "pretty", + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_first_instance)=== + + An error + ----------------------------- + ===[ValidationError]===(some_first_instance)=== + + Another error + ----------------------------- + ===[ValidationError]===(some_second_instance)=== + + BOOM + ----------------------------- + """, + ) + + def test_custom_error_format(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "--error-format", ":{error.message}._-_.{error.instance}:", + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr=":An error._-_.12::Another error._-_.12::BOOM._-_.foo:", + ) + + def test_invalid_schema(self): + self.assertOutputs( + files=dict(some_schema='{"type": 12}'), + argv=["some_schema"], + + exit_code=1, + stderr="""\ + 12: 12 is not valid under any of the given schemas + """, + ) + + def test_invalid_schema_pretty_output(self): + schema = {"type": 12} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance="") + error = str(e.exception) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_schema_multiple_errors(self): + self.assertOutputs( + files=dict(some_schema='{"type": 12, "items": 57}'), + argv=["some_schema"], + + exit_code=1, + stderr="""\ + 57: 57 is not of type 'object', 'boolean' + """, + ) + + def test_invalid_schema_multiple_errors_pretty_output(self): + schema = {"type": 12, "items": 57} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance="") + error = str(e.exception) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_schema_with_invalid_instance(self): + """ + "Validating" an instance that's invalid under an invalid schema + just shows the schema error. + """ + self.assertOutputs( + files=dict( + some_schema='{"type": 12, "minimum": 30}', + some_instance="13", + ), + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 12: 12 is not valid under any of the given schemas + """, + ) + + def test_invalid_schema_with_invalid_instance_pretty_output(self): + instance, schema = 13, {"type": 12, "minimum": 30} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance=instance) + error = str(e.exception) + + self.assertOutputs( + files=dict( + some_schema=json.dumps(schema), + some_instance=json.dumps(instance), + ), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_instance_continues_with_the_rest(self): + self.assertOutputs( + files=dict( + some_schema='{"minimum": 30}', + first_instance="not valid JSON!", + second_instance="12", + ), + argv=[ + "-i", "first_instance", + "-i", "second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + Failed to parse 'first_instance': {} + 12: 12 is less than the minimum of 30 + """.format(_message_for("not valid JSON!")), + ) + + def test_custom_error_format_applies_to_schema_errors(self): + instance, schema = 13, {"type": 12, "minimum": 30} + + with self.assertRaises(SchemaError): + validate(schema=schema, instance=instance) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + + argv=[ + "--error-format", ":{error.message}._-_.{error.instance}:", + "some_schema", + ], + + exit_code=1, + stderr=":12 is not valid under any of the given schemas._-_.12:", + ) + + def test_instance_is_invalid_JSON(self): + instance = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema="{}", some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="""\ + Failed to parse 'some_instance': {} + """.format(_message_for(instance)), + ) + + def test_instance_is_invalid_JSON_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict( + some_schema="{}", + some_instance="not valid JSON!", + ), + + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_instance)===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_schema", stderr) + + def test_instance_is_invalid_JSON_on_stdin(self): + instance = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO(instance), + + argv=["some_schema"], + + exit_code=1, + stderr="""\ + Failed to parse : {} + """.format(_message_for(instance)), + ) + + def test_instance_is_invalid_JSON_on_stdin_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict(some_schema="{}"), + stdin=StringIO("not valid JSON!"), + + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "()===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_schema", stderr) + + def test_schema_is_invalid_JSON(self): + schema = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema=schema), + + argv=["some_schema"], + + exit_code=1, + stderr="""\ + Failed to parse 'some_schema': {} + """.format(_message_for(schema)), + ) + + def test_schema_is_invalid_JSON_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict(some_schema="not valid JSON!"), + + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_schema)===\n\nTraceback (most recent call last):\n", + stderr, + ) + + def test_schema_and_instance_are_both_invalid_JSON(self): + """ + Only the schema error is reported, as we abort immediately. + """ + schema, instance = "not valid JSON!", "also not valid JSON!" + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + + argv=["some_schema"], + + exit_code=1, + stderr="""\ + Failed to parse 'some_schema': {} + """.format(_message_for(schema)), + ) + + def test_schema_and_instance_are_both_invalid_JSON_pretty_output(self): + """ + Only the schema error is reported, as we abort immediately. + """ + stdout, stderr = self.run_cli( + files=dict( + some_schema="not valid JSON!", + some_instance="also not valid JSON!", + ), + + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_schema)===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_instance", stderr) + + def test_instance_does_not_exist(self): + self.assertOutputs( + files=dict(some_schema="{}"), + argv=["-i", "nonexisting_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 'nonexisting_instance' does not exist. + """, + ) + + def test_instance_does_not_exist_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}"), + argv=[ + "--output", "pretty", + "-i", "nonexisting_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_instance)=== + + 'nonexisting_instance' does not exist. + ----------------------------- + """, + ) + + def test_schema_does_not_exist(self): + self.assertOutputs( + argv=["nonexisting_schema"], + + exit_code=1, + stderr="'nonexisting_schema' does not exist.\n", + ) + + def test_schema_does_not_exist_pretty_output(self): + self.assertOutputs( + argv=["--output", "pretty", "nonexisting_schema"], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_schema)=== + + 'nonexisting_schema' does not exist. + ----------------------------- + """, + ) + + def test_neither_instance_nor_schema_exist(self): + self.assertOutputs( + argv=["-i", "nonexisting_instance", "nonexisting_schema"], + + exit_code=1, + stderr="'nonexisting_schema' does not exist.\n", + ) + + def test_neither_instance_nor_schema_exist_pretty_output(self): + self.assertOutputs( + argv=[ + "--output", "pretty", + "-i", "nonexisting_instance", + "nonexisting_schema", + ], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_schema)=== + + 'nonexisting_schema' does not exist. + ----------------------------- + """, + ) + + def test_successful_validation(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + stdout="===[SUCCESS]===(some_instance)===\n", + stderr="", + ) + + def test_successful_validation_of_stdin(self): + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO("{}"), + argv=["some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_of_stdin_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO("{}"), + argv=["--output", "pretty", "some_schema"], + stdout="===[SUCCESS]===()===\n", + stderr="", + ) + + def test_successful_validation_of_just_the_schema(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_of_just_the_schema_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + stdout="===[SUCCESS]===(some_instance)===\n", + stderr="", + ) + + def test_successful_validation_via_explicit_base_uri(self): + ref_schema_file = tempfile.NamedTemporaryFile(delete=False) + self.addCleanup(os.remove, ref_schema_file.name) + + ref_path = Path(ref_schema_file.name) + ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}') + + schema = f'{{"$ref": "{ref_path.name}#definitions/num"}}' + + self.assertOutputs( + files=dict(some_schema=schema, some_instance="1"), + argv=[ + "-i", "some_instance", + "--base-uri", ref_path.parent.as_uri() + "/", + "some_schema", + ], + stdout="", + stderr="", + ) + + def test_unsuccessful_validation_via_explicit_base_uri(self): + ref_schema_file = tempfile.NamedTemporaryFile(delete=False) + self.addCleanup(os.remove, ref_schema_file.name) + + ref_path = Path(ref_schema_file.name) + ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}') + + schema = f'{{"$ref": "{ref_path.name}#definitions/num"}}' + + self.assertOutputs( + files=dict(some_schema=schema, some_instance='"1"'), + argv=[ + "-i", "some_instance", + "--base-uri", ref_path.parent.as_uri() + "/", + "some_schema", + ], + exit_code=1, + stdout="", + stderr="1: '1' is not of type 'integer'\n", + ) + + def test_nonexistent_file_with_explicit_base_uri(self): + schema = '{"$ref": "someNonexistentFile.json#definitions/num"}' + instance = "1" + + with self.assertRaises(RefResolutionError) as e: + self.assertOutputs( + files=dict( + some_schema=schema, + some_instance=instance, + ), + argv=[ + "-i", "some_instance", + "--base-uri", Path.cwd().as_uri(), + "some_schema", + ], + ) + error = str(e.exception) + self.assertIn(f"{os.sep}someNonexistentFile.json'", error) + + def test_invalid_exlicit_base_uri(self): + schema = '{"$ref": "foo.json#definitions/num"}' + instance = "1" + + with self.assertRaises(RefResolutionError) as e: + self.assertOutputs( + files=dict( + some_schema=schema, + some_instance=instance, + ), + argv=[ + "-i", "some_instance", + "--base-uri", "not@UR1", + "some_schema", + ], + ) + error = str(e.exception) + self.assertEqual( + error, "unknown url type: 'foo.json'", + ) + + def test_it_validates_using_the_latest_validator_when_unspecified(self): + # There isn't a better way now I can think of to ensure that the + # latest version was used, given that the call to validator_for + # is hidden inside the CLI, so guard that that's the case, and + # this test will have to be updated when versions change until + # we can think of a better way to ensure this behavior. + self.assertIs(Draft202012Validator, _LATEST_VERSION) + + self.assertOutputs( + files=dict(some_schema='{"const": "check"}', some_instance='"a"'), + argv=["-i", "some_instance", "some_schema"], + exit_code=1, + stdout="", + stderr="a: 'check' was expected\n", + ) + + def test_it_validates_using_draft7_when_specified(self): + """ + Specifically, `const` validation applies for Draft 7. + """ + schema = """ + { + "$schema": "http://json-schema.org/draft-07/schema#", + "const": "check" + } + """ + instance = '"foo"' + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + exit_code=1, + stdout="", + stderr="foo: 'check' was expected\n", + ) + + def test_it_validates_using_draft4_when_specified(self): + """ + Specifically, `const` validation *does not* apply for Draft 4. + """ + schema = """ + { + "$schema": "http://json-schema.org/draft-04/schema#", + "const": "check" + } + """ + instance = '"foo"' + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + class TestParser(TestCase): FakeValidator = fake_validator() - instance_file = "foo.json" - schema_file = "schema.json" - - def setUp(self): - cli.open = self.fake_open - self.addCleanup(delattr, cli, "open") - - def fake_open(self, path): - if path == self.instance_file: - contents = "" - elif path == self.schema_file: - contents = {} - else: # pragma: no cover - self.fail("What is {!r}".format(path)) - return NativeIO(json.dumps(contents)) def test_find_validator_by_fully_qualified_object_name(self): arguments = cli.parse_args( [ "--validator", "jsonschema.tests.test_cli.TestParser.FakeValidator", - "--instance", self.instance_file, - self.schema_file, - ] + "--instance", "mem://some/instance", + "mem://some/schema", + ], ) self.assertIs(arguments["validator"], self.FakeValidator) @@ -60,87 +849,47 @@ def test_find_validator_in_jsonschema(self): arguments = cli.parse_args( [ "--validator", "Draft4Validator", - "--instance", self.instance_file, - self.schema_file, - ] + "--instance", "mem://some/instance", + "mem://some/schema", + ], ) self.assertIs(arguments["validator"], Draft4Validator) + def cli_output_for(self, *argv): + stdout, stderr = StringIO(), StringIO() + with redirect_stdout(stdout), redirect_stderr(stderr): + with self.assertRaises(SystemExit): + cli.parse_args(argv) + return stdout.getvalue(), stderr.getvalue() -class TestCLI(TestCase): - def test_draft3_schema_draft4_validator(self): - stdout, stderr = NativeIO(), NativeIO() - with self.assertRaises(SchemaError): - cli.run( - { - "validator": Draft4Validator, - "schema": { - "anyOf": [ - {"minimum": 20}, - {"type": "string"}, - {"required": True}, - ], - }, - "instances": [1], - "error_format": "{error.message}", - }, - stdout=stdout, - stderr=stderr, - ) + def test_unknown_output(self): + stdout, stderr = self.cli_output_for( + "--output", "foo", + "mem://some/schema", + ) + self.assertIn("invalid choice: 'foo'", stderr) + self.assertFalse(stdout) - def test_successful_validation(self): - stdout, stderr = NativeIO(), NativeIO() - exit_code = cli.run( - { - "validator": fake_validator(), - "schema": {}, - "instances": [1], - "error_format": "{error.message}", - }, - stdout=stdout, - stderr=stderr, - ) - self.assertFalse(stdout.getvalue()) - self.assertFalse(stderr.getvalue()) - self.assertEqual(exit_code, 0) - - def test_unsuccessful_validation(self): - error = ValidationError("I am an error!", instance=1) - stdout, stderr = NativeIO(), NativeIO() - exit_code = cli.run( - { - "validator": fake_validator([error]), - "schema": {}, - "instances": [1], - "error_format": "{error.instance} - {error.message}", - }, - stdout=stdout, - stderr=stderr, - ) - self.assertFalse(stdout.getvalue()) - self.assertEqual(stderr.getvalue(), "1 - I am an error!") - self.assertEqual(exit_code, 1) - - def test_unsuccessful_validation_multiple_instances(self): - first_errors = [ - ValidationError("9", instance=1), - ValidationError("8", instance=1), - ] - second_errors = [ValidationError("7", instance=2)] - stdout, stderr = NativeIO(), NativeIO() - exit_code = cli.run( - { - "validator": fake_validator(first_errors, second_errors), - "schema": {}, - "instances": [1, 2], - "error_format": "{error.instance} - {error.message}\t", - }, - stdout=stdout, - stderr=stderr, - ) - self.assertFalse(stdout.getvalue()) - self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t") - self.assertEqual(exit_code, 1) + def test_useless_error_format(self): + stdout, stderr = self.cli_output_for( + "--output", "pretty", + "--error-format", "foo", + "mem://some/schema", + ) + self.assertIn( + "--error-format can only be used with --output plain", + stderr, + ) + self.assertFalse(stdout) + + +class TestCLIIntegration(TestCase): + def test_license(self): + output = subprocess.check_output( + [sys.executable, "-m", "pip", "show", "jsonschema"], + stderr=subprocess.STDOUT, + ) + self.assertIn(b"License: MIT", output) def test_version(self): version = subprocess.check_output( @@ -148,4 +897,15 @@ def test_version(self): stderr=subprocess.STDOUT, ) version = version.decode("utf-8").strip() - self.assertEqual(version, __version__) + self.assertEqual(version, metadata.version("jsonschema")) + + def test_no_arguments_shows_usage_notes(self): + output = subprocess.check_output( + [sys.executable, "-m", "jsonschema"], + stderr=subprocess.STDOUT, + ) + output_for_help = subprocess.check_output( + [sys.executable, "-m", "jsonschema", "--help"], + stderr=subprocess.STDOUT, + ) + self.assertEqual(output, output_for_help) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_deprecations.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_deprecations.py new file mode 100644 index 0000000..58fd050 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_deprecations.py @@ -0,0 +1,123 @@ +from unittest import TestCase + +from jsonschema import validators + + +class TestDeprecations(TestCase): + def test_version(self): + """ + As of v4.0.0, __version__ is deprecated in favor of importlib.metadata. + """ + + with self.assertWarns(DeprecationWarning) as w: + from jsonschema import __version__ # noqa + + self.assertEqual(w.filename, __file__) + self.assertTrue( + str(w.warning).startswith( + "Accessing jsonschema.__version__ is deprecated", + ), + ) + + def test_validators_ErrorTree(self): + """ + As of v4.0.0, importing ErrorTree from jsonschema.validators is + deprecated in favor of doing so from jsonschema.exceptions. + """ + + with self.assertWarns(DeprecationWarning) as w: + from jsonschema.validators import ErrorTree # noqa + + self.assertEqual(w.filename, __file__) + self.assertTrue( + str(w.warning).startswith( + "Importing ErrorTree from jsonschema.validators is deprecated", + ), + ) + + def test_validators_validators(self): + """ + As of v4.0.0, accessing jsonschema.validators.validators is + deprecated. + """ + + with self.assertWarns(DeprecationWarning) as w: + value = validators.validators + self.assertEqual(value, validators._VALIDATORS) + + self.assertEqual(w.filename, __file__) + self.assertTrue( + str(w.warning).startswith( + "Accessing jsonschema.validators.validators is deprecated", + ), + ) + + def test_validators_meta_schemas(self): + """ + As of v4.0.0, accessing jsonschema.validators.meta_schemas is + deprecated. + """ + + with self.assertWarns(DeprecationWarning) as w: + value = validators.meta_schemas + self.assertEqual(value, validators._META_SCHEMAS) + + self.assertEqual(w.filename, __file__) + self.assertTrue( + str(w.warning).startswith( + "Accessing jsonschema.validators.meta_schemas is deprecated", + ), + ) + + def test_RefResolver_in_scope(self): + """ + As of v4.0.0, RefResolver.in_scope is deprecated. + """ + + resolver = validators.RefResolver.from_schema({}) + with self.assertWarns(DeprecationWarning) as w: + with resolver.in_scope("foo"): + pass + + self.assertEqual(w.filename, __file__) + self.assertTrue( + str(w.warning).startswith( + "jsonschema.RefResolver.in_scope is deprecated ", + ), + ) + + def test_Validator_is_valid_two_arguments(self): + """ + As of v4.0.0, calling is_valid with two arguments (to provide a + different schema) is deprecated. + """ + + validator = validators.Draft7Validator({}) + with self.assertWarns(DeprecationWarning) as w: + result = validator.is_valid("foo", {"type": "number"}) + + self.assertFalse(result) + self.assertEqual(w.filename, __file__) + self.assertTrue( + str(w.warning).startswith( + "Passing a schema to Validator.is_valid is deprecated ", + ), + ) + + def test_Validator_iter_errors_two_arguments(self): + """ + As of v4.0.0, calling iter_errors with two arguments (to provide a + different schema) is deprecated. + """ + + validator = validators.Draft7Validator({}) + with self.assertWarns(DeprecationWarning) as w: + error, = validator.iter_errors("foo", {"type": "number"}) + + self.assertEqual(error.validator, "type") + self.assertEqual(w.filename, __file__) + self.assertTrue( + str(w.warning).startswith( + "Passing a schema to Validator.iter_errors is deprecated ", + ), + ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_exceptions.py old mode 100755 new mode 100644 index eae00d7..153b72e --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_exceptions.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_exceptions.py @@ -2,7 +2,6 @@ import textwrap from jsonschema import Draft4Validator, exceptions -from jsonschema.compat import PY3 class TestBestMatch(TestCase): @@ -42,7 +41,7 @@ def test_oneOf_and_anyOf_are_weak_matches(self): "minProperties": 2, "anyOf": [{"type": "string"}, {"type": "number"}], "oneOf": [{"type": "string"}, {"type": "number"}], - } + }, ) best = self.best_match(validator.iter_errors({})) self.assertEqual(best.validator, "minProperties") @@ -285,22 +284,36 @@ def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self): tree = exceptions.ErrorTree([error]) self.assertIsInstance(tree["foo"], exceptions.ErrorTree) + def test_repr(self): + e1, e2 = ( + exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1"), + exceptions.ValidationError( + "2", + validator="quux", + path=["foobar", 2], + instance="i2"), + ) + tree = exceptions.ErrorTree([e1, e2]) + self.assertEqual(repr(tree), "") + class TestErrorInitReprStr(TestCase): def make_error(self, **kwargs): defaults = dict( - message=u"hello", - validator=u"type", - validator_value=u"string", + message="hello", + validator="type", + validator_value="string", instance=5, - schema={u"type": u"string"}, + schema={"type": "string"}, ) defaults.update(kwargs) return exceptions.ValidationError(**defaults) def assertShows(self, expected, **kwargs): - if PY3: # pragma: no cover - expected = expected.replace("u'", "'") expected = textwrap.dedent(expected).rstrip("\n") error = self.make_error(**kwargs) @@ -315,7 +328,7 @@ def test_it_calls_super_and_sets_args(self): def test_repr(self): self.assertEqual( repr(exceptions.ValidationError(message="Hello!")), - "" % "Hello!", + "", ) def test_unset_error(self): @@ -338,8 +351,8 @@ def test_unset_error(self): def test_empty_paths(self): self.assertShows( """ - Failed validating u'type' in schema: - {u'type': u'string'} + Failed validating 'type' in schema: + {'type': 'string'} On instance: 5 @@ -351,8 +364,8 @@ def test_empty_paths(self): def test_one_item_paths(self): self.assertShows( """ - Failed validating u'type' in schema: - {u'type': u'string'} + Failed validating 'type' in schema: + {'type': 'string'} On instance[0]: 5 @@ -364,20 +377,20 @@ def test_one_item_paths(self): def test_multiple_item_paths(self): self.assertShows( """ - Failed validating u'type' in schema[u'items'][0]: - {u'type': u'string'} + Failed validating 'type' in schema['items'][0]: + {'type': 'string'} - On instance[0][u'a']: + On instance[0]['a']: 5 """, - path=[0, u"a"], - schema_path=[u"items", 0, 1], + path=[0, "a"], + schema_path=["items", 0, 1], ) def test_uses_pprint(self): self.assertShows( """ - Failed validating u'maxLength' in schema: + Failed validating 'maxLength' in schema: {0: 0, 1: 1, 2: 2, @@ -428,7 +441,7 @@ def test_uses_pprint(self): """, instance=list(range(25)), schema=dict(zip(range(20), range(20))), - validator=u"maxLength", + validator="maxLength", ) def test_str_works_with_instances_having_overriden_eq_operator(self): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_format.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_format.py old mode 100755 new mode 100644 index 254985f..1846cb2 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_format.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_format.py @@ -4,10 +4,9 @@ from unittest import TestCase -from jsonschema import FormatError, ValidationError, FormatChecker +from jsonschema import FormatChecker, FormatError, ValidationError from jsonschema.validators import Draft4Validator - BOOM = ValueError("Boom!") BANG = ZeroDivisionError("Bang!") @@ -41,7 +40,7 @@ def test_it_can_register_checkers(self): checker.checks("boom")(boom) self.assertEqual( checker.checkers, - dict(FormatChecker.checkers, boom=(boom, ())) + dict(FormatChecker.checkers, boom=(boom, ())), ) def test_it_catches_registered_errors(self): @@ -80,10 +79,29 @@ def test_format_checkers_come_with_defaults(self): def test_repr(self): checker = FormatChecker(formats=()) - checker.checks("foo")(lambda thing: True) - checker.checks("bar")(lambda thing: True) - checker.checks("baz")(lambda thing: True) + checker.checks("foo")(lambda thing: True) # pragma: no cover + checker.checks("bar")(lambda thing: True) # pragma: no cover + checker.checks("baz")(lambda thing: True) # pragma: no cover self.assertEqual( repr(checker), "", ) + + def test_duration_format(self): + try: + from jsonschema._format import is_duration # noqa: F401 + except ImportError: # pragma: no cover + pass + else: + checker = FormatChecker() + self.assertTrue(checker.conforms(1, "duration")) + self.assertTrue(checker.conforms("P4Y", "duration")) + self.assertFalse(checker.conforms("test", "duration")) + + def test_uuid_format(self): + checker = FormatChecker() + self.assertTrue(checker.conforms(1, "uuid")) + self.assertTrue( + checker.conforms("6e6659ec-4503-4428-9f03-2e2ea4d6c278", "uuid"), + ) + self.assertFalse(checker.conforms("test", "uuid")) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py old mode 100755 new mode 100644 index ebccf29..c26da0a --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_jsonschema_test_suite.py @@ -7,28 +7,31 @@ """ import sys -import warnings from jsonschema import ( Draft3Validator, Draft4Validator, Draft6Validator, Draft7Validator, + Draft201909Validator, + Draft202012Validator, draft3_format_checker, draft4_format_checker, draft6_format_checker, draft7_format_checker, + draft201909_format_checker, + draft202012_format_checker, ) from jsonschema.tests._helpers import bug from jsonschema.tests._suite import Suite -from jsonschema.validators import _DEPRECATED_DEFAULT_TYPES, create - SUITE = Suite() DRAFT3 = SUITE.version(name="draft3") DRAFT4 = SUITE.version(name="draft4") DRAFT6 = SUITE.version(name="draft6") DRAFT7 = SUITE.version(name="draft7") +DRAFT201909 = SUITE.version(name="draft2019-09") +DRAFT202012 = SUITE.version(name="draft2020-12") def skip(message, **kwargs): @@ -39,16 +42,38 @@ def skipper(test): def missing_format(checker): - def missing_format(test): + def missing_format(test): # pragma: no cover schema = test.schema - if schema is True or schema is False or "format" not in schema: + if ( + schema is True + or schema is False + or "format" not in schema + or schema["format"] in checker.checkers + or test.valid + ): return - if schema["format"] not in checker.checkers: - return "Format checker {0!r} not found.".format(schema["format"]) + return "Format checker {0!r} not found.".format(schema["format"]) return missing_format +def complex_email_validation(test): + if test.subject != "email": + return + + message = "Complex email validation is (intentionally) unsupported." + return skip( + message=message, + description="dot after local part is not valid", + )(test) or skip( + message=message, + description="dot before local part is not valid", + )(test) or skip( + message=message, + description="two subsequent dots inside local part are not valid", + )(test) + + is_narrow_build = sys.maxunicode == 2 ** 16 - 1 if is_narrow_build: # pragma: no cover message = "Not running surrogate Unicode case, this Python is narrow." @@ -56,7 +81,9 @@ def missing_format(test): def narrow_unicode_build(test): # pragma: no cover return skip( message=message, - description="one supplementary Unicode code point is not long enough", + description=( + "one supplementary Unicode code point is not long enough" + ), )(test) or skip( message=message, description="two supplementary Unicode code points is long enough", @@ -66,20 +93,83 @@ def narrow_unicode_build(test): # pragma: no cover return +if sys.version_info < (3, 9): # pragma: no cover + message = "Rejecting leading zeros is 3.9+" + allowed_leading_zeros = skip( + message=message, + subject="ipv4", + description=( + "leading zeroes should be rejected, as they are treated as octals" + ), + ) +else: + def allowed_leading_zeros(test): # pragma: no cover + return + + +def leap_second(test): + message = "Leap seconds are unsupported." + return skip( + message=message, + subject="time", + description="a valid time string with leap second", + )(test) or skip( + message=message, + subject="time", + description="a valid time string with leap second, Zulu", + )(test) or skip( + message=message, + subject="time", + description="a valid time string with leap second with offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, positive time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, negative time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, large positive time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, large negative time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, zero time-offset", + )(test) or skip( + message=message, + subject="date-time", + description="a valid date-time with a leap second, UTC", + )(test) or skip( + message=message, + subject="date-time", + description="a valid date-time with a leap second, with minus offset", + )(test) + + TestDraft3 = DRAFT3.to_unittest_testcase( DRAFT3.tests(), + DRAFT3.format_tests(), DRAFT3.optional_tests_of(name="bignum"), - DRAFT3.optional_tests_of(name="format"), + DRAFT3.optional_tests_of(name="non-bmp-regex"), DRAFT3.optional_tests_of(name="zeroTerminatedFloats"), Validator=Draft3Validator, format_checker=draft3_format_checker, skip=lambda test: ( narrow_unicode_build(test) or missing_format(draft3_format_checker)(test) + or complex_email_validation(test) or skip( - message="Upstream bug in strict_rfc3339", - subject="format", - description="case-insensitive T and Z", + message=bug(371), + subject="ref", + case_description=( + "$ref prevents a sibling id from changing the base uri" + ), )(test) ), ) @@ -87,14 +177,19 @@ def narrow_unicode_build(test): # pragma: no cover TestDraft4 = DRAFT4.to_unittest_testcase( DRAFT4.tests(), + DRAFT4.format_tests(), DRAFT4.optional_tests_of(name="bignum"), - DRAFT4.optional_tests_of(name="format"), + DRAFT4.optional_tests_of(name="float-overflow"), + DRAFT4.optional_tests_of(name="non-bmp-regex"), DRAFT4.optional_tests_of(name="zeroTerminatedFloats"), Validator=Draft4Validator, format_checker=draft4_format_checker, skip=lambda test: ( narrow_unicode_build(test) + or allowed_leading_zeros(test) + or leap_second(test) or missing_format(draft4_format_checker)(test) + or complex_email_validation(test) or skip( message=bug(), subject="ref", @@ -103,21 +198,27 @@ def narrow_unicode_build(test): # pragma: no cover or skip( message=bug(371), subject="ref", - case_description="Location-independent identifier", + case_description=( + "Location-independent identifier with " + "base URI change in subschema" + ), )(test) or skip( message=bug(371), subject="ref", case_description=( - "Location-independent identifier with absolute URI" + "$ref prevents a sibling id from changing the base uri" ), )(test) or skip( message=bug(371), - subject="ref", - case_description=( - "Location-independent identifier with base URI change in subschema" - ), + subject="id", + description="match $ref to id", + )(test) + or skip( + message=bug(371), + subject="id", + description="no match on enum or $ref to id", )(test) or skip( message=bug(), @@ -125,9 +226,12 @@ def narrow_unicode_build(test): # pragma: no cover case_description="base URI change - change folder in subschema", )(test) or skip( - message="Upstream bug in strict_rfc3339", - subject="format", - description="case-insensitive T and Z", + message=bug(), + subject="ref", + case_description=( + "id must be resolved against nearest parent, " + "not just immediate parent" + ), )(test) ), ) @@ -135,36 +239,24 @@ def narrow_unicode_build(test): # pragma: no cover TestDraft6 = DRAFT6.to_unittest_testcase( DRAFT6.tests(), + DRAFT6.format_tests(), DRAFT6.optional_tests_of(name="bignum"), - DRAFT6.optional_tests_of(name="format"), - DRAFT6.optional_tests_of(name="zeroTerminatedFloats"), + DRAFT6.optional_tests_of(name="float-overflow"), + DRAFT6.optional_tests_of(name="non-bmp-regex"), Validator=Draft6Validator, format_checker=draft6_format_checker, skip=lambda test: ( narrow_unicode_build(test) + or allowed_leading_zeros(test) + or leap_second(test) or missing_format(draft6_format_checker)(test) - or skip( - message=bug(), - subject="ref", - case_description="Recursive references between schemas", - )(test) - or skip( - message=bug(371), - subject="ref", - case_description="Location-independent identifier", - )(test) - or skip( - message=bug(371), - subject="ref", - case_description=( - "Location-independent identifier with absolute URI" - ), - )(test) + or complex_email_validation(test) or skip( message=bug(371), subject="ref", case_description=( - "Location-independent identifier with base URI change in subschema" + "Location-independent identifier with " + "base URI change in subschema" ), )(test) or skip( @@ -173,9 +265,11 @@ def narrow_unicode_build(test): # pragma: no cover case_description="base URI change - change folder in subschema", )(test) or skip( - message="Upstream bug in strict_rfc3339", - subject="format", - description="case-insensitive T and Z", + message=bug(371), + subject="ref", + case_description=( + "$ref prevents a sibling $id from changing the base uri" + ), )(test) ), ) @@ -186,49 +280,48 @@ def narrow_unicode_build(test): # pragma: no cover DRAFT7.format_tests(), DRAFT7.optional_tests_of(name="bignum"), DRAFT7.optional_tests_of(name="content"), - DRAFT7.optional_tests_of(name="zeroTerminatedFloats"), + DRAFT7.optional_tests_of(name="float-overflow"), + DRAFT7.optional_tests_of(name="non-bmp-regex"), Validator=Draft7Validator, format_checker=draft7_format_checker, skip=lambda test: ( narrow_unicode_build(test) + or allowed_leading_zeros(test) + or leap_second(test) or missing_format(draft7_format_checker)(test) + or complex_email_validation(test) or skip( - message=bug(), + message=bug(371), subject="ref", - case_description="Recursive references between schemas", + case_description=( + "Location-independent identifier with " + "base URI change in subschema" + ), )(test) or skip( - message=bug(371), - subject="ref", - case_description="Location-independent identifier", + message=bug(), + subject="refRemote", + case_description="base URI change - change folder in subschema", )(test) or skip( message=bug(371), subject="ref", case_description=( - "Location-independent identifier with absolute URI" + "$ref prevents a sibling $id from changing the base uri" ), )(test) or skip( - message=bug(371), + message=bug(), subject="ref", case_description=( - "Location-independent identifier with base URI change in subschema" + "$id must be resolved against nearest parent, " + "not just immediate parent" ), )(test) - or skip( - message=bug(), - subject="refRemote", - case_description="base URI change - change folder in subschema", - )(test) - or skip( - message="Upstream bug in strict_rfc3339", - subject="date-time", - description="case-insensitive T and Z", - )(test) or skip( message=bug(593), subject="content", + valid=False, case_description=( "validation of string-encoded content based on media type" ), @@ -236,11 +329,13 @@ def narrow_unicode_build(test): # pragma: no cover or skip( message=bug(593), subject="content", + valid=False, case_description="validation of binary string-encoding", )(test) or skip( message=bug(593), subject="content", + valid=False, case_description=( "validation of binary-encoded media type documents" ), @@ -249,29 +344,104 @@ def narrow_unicode_build(test): # pragma: no cover ) -with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) +TestDraft201909 = DRAFT201909.to_unittest_testcase( + DRAFT201909.tests(), + DRAFT201909.optional_tests_of(name="bignum"), + DRAFT201909.optional_tests_of(name="float-overflow"), + DRAFT201909.optional_tests_of(name="non-bmp-regex"), + DRAFT201909.optional_tests_of(name="refOfUnknownKeyword"), + Validator=Draft201909Validator, + skip=lambda test: ( + skip( + message="unevaluatedItems is different in 2019-09 (needs work).", + subject="unevaluatedItems", + )(test) + or skip( + message="dynamicRef support isn't working yet.", + subject="recursiveRef", + )(test) + or skip( + message="These tests depends on dynamicRef working.", + subject="anchor", + case_description="same $anchor with different base uri", + )(test) + or skip( + message="Vocabulary support is not yet present.", + subject="vocabulary", + )(test) + or skip( + message=bug(), + subject="ref", + case_description=( + "$id must be resolved against nearest parent, " + "not just immediate parent" + ), + )(test) + ), +) - TestDraft3LegacyTypeCheck = DRAFT3.to_unittest_testcase( - # Interestingly the any part couldn't really be done w/the old API. - ( - (test for test in each if test.schema != {"type": "any"}) - for each in DRAFT3.tests_of(name="type") - ), - name="TestDraft3LegacyTypeCheck", - Validator=create( - meta_schema=Draft3Validator.META_SCHEMA, - validators=Draft3Validator.VALIDATORS, - default_types=_DEPRECATED_DEFAULT_TYPES, - ), - ) - TestDraft4LegacyTypeCheck = DRAFT4.to_unittest_testcase( - DRAFT4.tests_of(name="type"), - name="TestDraft4LegacyTypeCheck", - Validator=create( - meta_schema=Draft4Validator.META_SCHEMA, - validators=Draft4Validator.VALIDATORS, - default_types=_DEPRECATED_DEFAULT_TYPES, - ), - ) +TestDraft201909Format = DRAFT201909.to_unittest_testcase( + DRAFT201909.format_tests(), + Validator=Draft201909Validator, + format_checker=draft201909_format_checker, + skip=lambda test: ( + complex_email_validation(test) + or allowed_leading_zeros(test) + or leap_second(test) + or missing_format(draft201909_format_checker)(test) + or complex_email_validation(test) + ), +) + + +TestDraft202012 = DRAFT202012.to_unittest_testcase( + DRAFT202012.tests(), + DRAFT202012.optional_tests_of(name="bignum"), + DRAFT202012.optional_tests_of(name="float-overflow"), + DRAFT202012.optional_tests_of(name="non-bmp-regex"), + DRAFT202012.optional_tests_of(name="refOfUnknownKeyword"), + Validator=Draft202012Validator, + skip=lambda test: ( + narrow_unicode_build(test) + or skip( + message="dynamicRef support isn't working yet.", + subject="dynamicRef", + )(test) + or skip( + message="These tests depends on dynamicRef working.", + subject="defs", + )(test) + or skip( + message="These tests depends on dynamicRef working.", + subject="anchor", + case_description="same $anchor with different base uri", + )(test) + or skip( + message="Vocabulary support is not yet present.", + subject="vocabulary", + )(test) + or skip( + message=bug(), + subject="ref", + case_description=( + "$id must be resolved against nearest parent, " + "not just immediate parent" + ), + )(test) + ), +) + + +TestDraft202012Format = DRAFT202012.to_unittest_testcase( + DRAFT202012.format_tests(), + Validator=Draft202012Validator, + format_checker=draft202012_format_checker, + skip=lambda test: ( + complex_email_validation(test) + or allowed_leading_zeros(test) + or leap_second(test) + or missing_format(draft202012_format_checker)(test) + or complex_email_validation(test) + ), +) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_types.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_types.py old mode 100755 new mode 100644 index 2280cc3..3fd1a70 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_types.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_types.py @@ -1,15 +1,17 @@ """ -Tests on the new type interface. The actual correctness of the type checking -is handled in test_jsonschema_test_suite; these tests check that TypeChecker -functions correctly and can facilitate extensions to type checking +Tests for the `TypeChecker`-based type interface. + +The actual correctness of the type checking is handled in +`test_jsonschema_test_suite`; these tests check that TypeChecker +functions correctly at a more granular level. """ from collections import namedtuple from unittest import TestCase from jsonschema import ValidationError, _validators from jsonschema._types import TypeChecker -from jsonschema.exceptions import UndefinedTypeCheck -from jsonschema.validators import Draft4Validator, extend +from jsonschema.exceptions import UndefinedTypeCheck, UnknownType +from jsonschema.validators import Draft202012Validator, extend def equals_2(checker, instance): @@ -21,23 +23,11 @@ def is_namedtuple(instance): def is_object_or_named_tuple(checker, instance): - if Draft4Validator.TYPE_CHECKER.is_type(instance, "object"): + if Draft202012Validator.TYPE_CHECKER.is_type(instance, "object"): return True return is_namedtuple(instance) -def coerce_named_tuple(fn): - def coerced(validator, value, instance, schema): - if is_namedtuple(instance): - instance = instance._asdict() - return fn(validator, value, instance, schema) - return coerced - - -required = coerce_named_tuple(_validators.required) -properties = coerce_named_tuple(_validators.properties) - - class TestTypeChecker(TestCase): def test_is_type(self): checker = TypeChecker({"two": equals_2}) @@ -50,9 +40,16 @@ def test_is_type(self): ) def test_is_unknown_type(self): - with self.assertRaises(UndefinedTypeCheck) as context: + with self.assertRaises(UndefinedTypeCheck) as e: TypeChecker().is_type(4, "foobar") - self.assertIn("foobar", str(context.exception)) + self.assertIn( + "'foobar' is unknown to this type checker", + str(e.exception), + ) + self.assertTrue( + e.exception.__suppress_context__, + msg="Expected the internal KeyError to be hidden.", + ) def test_checks_can_be_added_at_init(self): checker = TypeChecker({"two": equals_2}) @@ -121,8 +118,8 @@ def int_or_str_int(checker, instance): return True CustomValidator = extend( - Draft4Validator, - type_checker=Draft4Validator.TYPE_CHECKER.redefine( + Draft202012Validator, + type_checker=Draft202012Validator.TYPE_CHECKER.redefine( "integer", int_or_str_int, ), ) @@ -134,16 +131,22 @@ def int_or_str_int(checker, instance): with self.assertRaises(ValidationError): validator.validate(4.4) + with self.assertRaises(ValidationError): + validator.validate("foo") + def test_object_can_be_extended(self): schema = {"type": "object"} Point = namedtuple("Point", ["x", "y"]) - type_checker = Draft4Validator.TYPE_CHECKER.redefine( - u"object", is_object_or_named_tuple, + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, ) - CustomValidator = extend(Draft4Validator, type_checker=type_checker) + CustomValidator = extend( + Draft202012Validator, + type_checker=type_checker, + ) validator = CustomValidator(schema) validator.validate(Point(x=4, y=5)) @@ -151,11 +154,14 @@ def test_object_can_be_extended(self): def test_object_extensions_require_custom_validators(self): schema = {"type": "object", "required": ["x"]} - type_checker = Draft4Validator.TYPE_CHECKER.redefine( - u"object", is_object_or_named_tuple, + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, ) - CustomValidator = extend(Draft4Validator, type_checker=type_checker) + CustomValidator = extend( + Draft202012Validator, + type_checker=type_checker, + ) validator = CustomValidator(schema) Point = namedtuple("Point", ["x", "y"]) @@ -170,12 +176,22 @@ def test_object_extensions_can_handle_custom_validators(self): "properties": {"x": {"type": "integer"}}, } - type_checker = Draft4Validator.TYPE_CHECKER.redefine( - u"object", is_object_or_named_tuple, + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, ) + def coerce_named_tuple(fn): + def coerced(validator, value, instance, schema): + if is_namedtuple(instance): + instance = instance._asdict() + return fn(validator, value, instance, schema) + return coerced + + required = coerce_named_tuple(_validators.required) + properties = coerce_named_tuple(_validators.properties) + CustomValidator = extend( - Draft4Validator, + Draft202012Validator, type_checker=type_checker, validators={"required": required, "properties": properties}, ) @@ -188,3 +204,14 @@ def test_object_extensions_can_handle_custom_validators(self): with self.assertRaises(ValidationError): validator.validate(Point(x="not an integer", y=5)) + + # As well as still handle objects. + validator.validate({"x": 4, "y": 5}) + + with self.assertRaises(ValidationError): + validator.validate({"x": "not an integer", "y": 5}) + + def test_unknown_type(self): + with self.assertRaises(UnknownType) as e: + Draft202012Validator({}).is_type(12, "some unknown type") + self.assertIn("'some unknown type'", str(e.exception)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_utils.py new file mode 100644 index 0000000..4e542b9 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_utils.py @@ -0,0 +1,124 @@ +from unittest import TestCase + +from jsonschema._utils import equal + + +class TestEqual(TestCase): + def test_none(self): + self.assertTrue(equal(None, None)) + + +class TestDictEqual(TestCase): + def test_equal_dictionaries(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "b"} + self.assertTrue(equal(dict_1, dict_2)) + + def test_missing_key(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "x": "b"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_additional_key(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "b", "x": "x"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_missing_value(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "x"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_empty_dictionaries(self): + dict_1 = {} + dict_2 = {} + self.assertTrue(equal(dict_1, dict_2)) + + def test_one_none(self): + dict_1 = None + dict_2 = {"a": "b", "c": "d"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_same_item(self): + dict_1 = {"a": "b", "c": "d"} + self.assertTrue(equal(dict_1, dict_1)) + + def test_nested_equal(self): + dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"} + dict_2 = {"c": "d", "a": {"a": "b", "c": "d"}} + self.assertTrue(equal(dict_1, dict_2)) + + def test_nested_dict_unequal(self): + dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"} + dict_2 = {"c": "d", "a": {"a": "b", "c": "x"}} + self.assertFalse(equal(dict_1, dict_2)) + + def test_mixed_nested_equal(self): + dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"} + dict_2 = {"c": "d", "a": ["a", "b", "c", "d"]} + self.assertTrue(equal(dict_1, dict_2)) + + def test_nested_list_unequal(self): + dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"} + dict_2 = {"c": "d", "a": ["b", "c", "d", "a"]} + self.assertFalse(equal(dict_1, dict_2)) + + +class TestListEqual(TestCase): + def test_equal_lists(self): + list_1 = ["a", "b", "c"] + list_2 = ["a", "b", "c"] + self.assertTrue(equal(list_1, list_2)) + + def test_unsorted_lists(self): + list_1 = ["a", "b", "c"] + list_2 = ["b", "b", "a"] + self.assertFalse(equal(list_1, list_2)) + + def test_first_list_larger(self): + list_1 = ["a", "b", "c"] + list_2 = ["a", "b"] + self.assertFalse(equal(list_1, list_2)) + + def test_second_list_larger(self): + list_1 = ["a", "b"] + list_2 = ["a", "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + def test_list_with_none_unequal(self): + list_1 = ["a", "b", None] + list_2 = ["a", "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + list_1 = ["a", "b", None] + list_2 = [None, "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + def test_list_with_none_equal(self): + list_1 = ["a", None, "c"] + list_2 = ["a", None, "c"] + self.assertTrue(equal(list_1, list_2)) + + def test_empty_list(self): + list_1 = [] + list_2 = [] + self.assertTrue(equal(list_1, list_2)) + + def test_one_none(self): + list_1 = None + list_2 = [] + self.assertFalse(equal(list_1, list_2)) + + def test_same_list(self): + list_1 = ["a", "b", "c"] + self.assertTrue(equal(list_1, list_1)) + + def test_equal_nested_lists(self): + list_1 = ["a", ["b", "c"], "d"] + list_2 = ["a", ["b", "c"], "d"] + self.assertTrue(equal(list_1, list_2)) + + def test_unequal_nested_lists(self): + list_1 = ["a", ["b", "c"], "d"] + list_2 = ["a", [], "c"] + self.assertFalse(equal(list_1, list_2)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_validators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_validators.py old mode 100755 new mode 100644 index 07be4f0..8b69a69 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_validators.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/tests/test_validators.py @@ -1,37 +1,46 @@ -from collections import deque +from __future__ import annotations + +from collections import deque, namedtuple from contextlib import contextmanager from decimal import Decimal from io import BytesIO -from unittest import TestCase +from unittest import TestCase, mock +from urllib.request import pathname2url import json import os import sys import tempfile import unittest +import warnings -from twisted.trial.unittest import SynchronousTestCase import attr -from jsonschema import FormatChecker, TypeChecker, exceptions, validators -from jsonschema.compat import PY3, pathname2url +from jsonschema import ( + FormatChecker, + TypeChecker, + exceptions, + protocols, + validators, +) from jsonschema.tests._helpers import bug -def startswith(validator, startswith, instance, schema): - if not instance.startswith(startswith): - yield exceptions.ValidationError(u"Whoops!") +def fail(validator, errors, instance, schema): + for each in errors: + each.setdefault("message", "You told me to fail!") + yield exceptions.ValidationError(**each) -class TestCreateAndExtend(SynchronousTestCase): +class TestCreateAndExtend(TestCase): def setUp(self): self.addCleanup( self.assertEqual, - validators.meta_schemas, - dict(validators.meta_schemas), + validators._META_SCHEMAS, + dict(validators._META_SCHEMAS), ) - self.meta_schema = {u"$id": "some://meta/schema"} - self.validators = {u"startswith": startswith} + self.meta_schema = {"$id": "some://meta/schema"} + self.validators = {"fail": fail} self.type_checker = TypeChecker() self.Validator = validators.create( meta_schema=self.meta_schema, @@ -53,66 +62,123 @@ def test_attrs(self): ) def test_init(self): - schema = {u"startswith": u"foo"} + schema = {"fail": []} self.assertEqual(self.Validator(schema).schema, schema) - def test_iter_errors(self): - schema = {u"startswith": u"hel"} - iter_errors = self.Validator(schema).iter_errors + def test_iter_errors_successful(self): + schema = {"fail": []} + validator = self.Validator(schema) - errors = list(iter_errors(u"hello")) + errors = list(validator.iter_errors("hello")) self.assertEqual(errors, []) + def test_iter_errors_one_error(self): + schema = {"fail": [{"message": "Whoops!"}]} + validator = self.Validator(schema) + expected_error = exceptions.ValidationError( - u"Whoops!", - instance=u"goodbye", + "Whoops!", + instance="goodbye", schema=schema, - validator=u"startswith", - validator_value=u"hel", - schema_path=deque([u"startswith"]), + validator="fail", + validator_value=[{"message": "Whoops!"}], + schema_path=deque(["fail"]), ) - errors = list(iter_errors(u"goodbye")) + errors = list(validator.iter_errors("goodbye")) self.assertEqual(len(errors), 1) self.assertEqual(errors[0]._contents(), expected_error._contents()) + def test_iter_errors_multiple_errors(self): + schema = { + "fail": [ + {"message": "First"}, + {"message": "Second!", "validator": "asdf"}, + {"message": "Third"}, + ], + } + validator = self.Validator(schema) + + errors = list(validator.iter_errors("goodbye")) + self.assertEqual(len(errors), 3) + def test_if_a_version_is_provided_it_is_registered(self): Validator = validators.create( - meta_schema={u"$id": "something"}, + meta_schema={"$id": "something"}, version="my version", ) - self.addCleanup(validators.meta_schemas.pop, "something") + self.addCleanup(validators._META_SCHEMAS.pop, "something") self.assertEqual(Validator.__name__, "MyVersionValidator") + self.assertEqual(Validator.__qualname__, "MyVersionValidator") + + def test_repr(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.assertEqual( + repr(Validator({})), + "MyVersionValidator(schema={}, format_checker=None)", + ) + + def test_long_repr(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.assertEqual( + repr(Validator({"a": list(range(1000))})), ( + "MyVersionValidator(schema={'a': [0, 1, 2, 3, 4, 5, ...]}, " + "format_checker=None)" + ), + ) + + def test_repr_no_version(self): + Validator = validators.create(meta_schema={}) + self.assertEqual( + repr(Validator({})), + "Validator(schema={}, format_checker=None)", + ) + + def test_dashes_are_stripped_from_validator_names(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="foo-bar", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.assertEqual(Validator.__qualname__, "FooBarValidator") def test_if_a_version_is_not_provided_it_is_not_registered(self): - original = dict(validators.meta_schemas) - validators.create(meta_schema={u"id": "id"}) - self.assertEqual(validators.meta_schemas, original) + original = dict(validators._META_SCHEMAS) + validators.create(meta_schema={"id": "id"}) + self.assertEqual(validators._META_SCHEMAS, original) def test_validates_registers_meta_schema_id(self): meta_schema_key = "meta schema id" - my_meta_schema = {u"id": meta_schema_key} + my_meta_schema = {"id": meta_schema_key} validators.create( meta_schema=my_meta_schema, version="my version", id_of=lambda s: s.get("id", ""), ) - self.addCleanup(validators.meta_schemas.pop, meta_schema_key) + self.addCleanup(validators._META_SCHEMAS.pop, meta_schema_key) - self.assertIn(meta_schema_key, validators.meta_schemas) + self.assertIn(meta_schema_key, validators._META_SCHEMAS) def test_validates_registers_meta_schema_draft6_id(self): meta_schema_key = "meta schema $id" - my_meta_schema = {u"$id": meta_schema_key} + my_meta_schema = {"$id": meta_schema_key} validators.create( meta_schema=my_meta_schema, version="my version", ) - self.addCleanup(validators.meta_schemas.pop, meta_schema_key) + self.addCleanup(validators._META_SCHEMAS.pop, meta_schema_key) - self.assertIn(meta_schema_key, validators.meta_schemas) + self.assertIn(meta_schema_key, validators._META_SCHEMAS) def test_create_default_types(self): Validator = validators.create(meta_schema={}, validators=()) @@ -120,13 +186,13 @@ def test_create_default_types(self): all( Validator({}).is_type(instance=instance, type=type) for type, instance in [ - (u"array", []), - (u"boolean", True), - (u"integer", 12), - (u"null", None), - (u"number", 12.0), - (u"object", {}), - (u"string", u"foo"), + ("array", []), + ("boolean", True), + ("integer", 12), + ("null", None), + ("number", 12.0), + ("object", {}), + ("string", "foo"), ] ), ) @@ -137,7 +203,7 @@ def test_extend(self): Extended = validators.extend( self.Validator, - validators={u"new": new}, + validators={"new": new}, ) self.assertEqual( ( @@ -158,11 +224,11 @@ def test_extend_idof(self): Extending a validator preserves its notion of schema IDs. """ def id_of(schema): - return schema.get(u"__test__", self.Validator.ID_OF(schema)) + return schema.get("__test__", self.Validator.ID_OF(schema)) correct_id = "the://correct/id/" meta_schema = { - u"$id": "the://wrong/id/", - u"__test__": correct_id, + "$id": "the://wrong/id/", + "__test__": correct_id, } Original = validators.create( meta_schema=meta_schema, @@ -176,289 +242,53 @@ def id_of(schema): self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id) -class TestLegacyTypeChecking(SynchronousTestCase): - def test_create_default_types(self): - Validator = validators.create(meta_schema={}, validators=()) - self.assertEqual( - set(Validator.DEFAULT_TYPES), { - u"array", - u"boolean", - u"integer", - u"null", - u"number", - u"object", u"string", - }, - ) - self.flushWarnings() - - def test_extend(self): - Validator = validators.create(meta_schema={}, validators=()) - original = dict(Validator.VALIDATORS) - new = object() - - Extended = validators.extend( - Validator, - validators={u"new": new}, - ) - self.assertEqual( - ( - Extended.VALIDATORS, - Extended.META_SCHEMA, - Extended.TYPE_CHECKER, - Validator.VALIDATORS, - - Extended.DEFAULT_TYPES, - Extended({}).DEFAULT_TYPES, - self.flushWarnings()[0]["message"], - ), ( - dict(original, new=new), - Validator.META_SCHEMA, - Validator.TYPE_CHECKER, - original, - - Validator.DEFAULT_TYPES, - Validator.DEFAULT_TYPES, - self.flushWarnings()[0]["message"], - ), - ) - - def test_types_redefines_the_validators_type_checker(self): - schema = {"type": "string"} - self.assertFalse(validators.Draft7Validator(schema).is_valid(12)) - - validator = validators.Draft7Validator( - schema, - types={"string": (str, int)}, - ) - self.assertTrue(validator.is_valid(12)) - self.flushWarnings() - - def test_providing_default_types_warns(self): - self.assertWarns( - category=DeprecationWarning, - message=( - "The default_types argument is deprecated. " - "Use the type_checker argument instead." - ), - # https://tm.tl/9363 :'( - filename=sys.modules[self.assertWarns.__module__].__file__, - - f=validators.create, - meta_schema={}, - validators={}, - default_types={"foo": object}, - ) - - def test_cannot_ask_for_default_types_with_non_default_type_checker(self): - """ - We raise an error when you ask a validator with non-default - type checker for its DEFAULT_TYPES. - - The type checker argument is new, so no one but this library - itself should be trying to use it, and doing so while then - asking for DEFAULT_TYPES makes no sense (not to mention is - deprecated), since type checkers are not strictly about Python - type. - """ - Validator = validators.create( - meta_schema={}, - validators={}, - type_checker=TypeChecker(), - ) - with self.assertRaises(validators._DontDoThat) as e: - Validator.DEFAULT_TYPES - - self.assertIn( - "DEFAULT_TYPES cannot be used on Validators using TypeCheckers", - str(e.exception), - ) - with self.assertRaises(validators._DontDoThat): - Validator({}).DEFAULT_TYPES - - self.assertFalse(self.flushWarnings()) - - def test_providing_explicit_type_checker_does_not_warn(self): - Validator = validators.create( - meta_schema={}, - validators={}, - type_checker=TypeChecker(), - ) - self.assertFalse(self.flushWarnings()) - - Validator({}) - self.assertFalse(self.flushWarnings()) - - def test_providing_neither_does_not_warn(self): - Validator = validators.create(meta_schema={}, validators={}) - self.assertFalse(self.flushWarnings()) - - Validator({}) - self.assertFalse(self.flushWarnings()) - - def test_providing_default_types_with_type_checker_errors(self): - with self.assertRaises(TypeError) as e: - validators.create( - meta_schema={}, - validators={}, - default_types={"foo": object}, - type_checker=TypeChecker(), - ) - - self.assertIn( - "Do not specify default_types when providing a type checker", - str(e.exception), - ) - self.assertFalse(self.flushWarnings()) - - def test_extending_a_legacy_validator_with_a_type_checker_errors(self): - Validator = validators.create( - meta_schema={}, - validators={}, - default_types={u"array": list} - ) - with self.assertRaises(TypeError) as e: - validators.extend( - Validator, - validators={}, - type_checker=TypeChecker(), - ) - - self.assertIn( - ( - "Cannot extend a validator created with default_types " - "with a type_checker. Update the validator to use a " - "type_checker when created." - ), - str(e.exception), - ) - self.flushWarnings() - - def test_extending_a_legacy_validator_does_not_rewarn(self): - Validator = validators.create(meta_schema={}, default_types={}) - self.assertTrue(self.flushWarnings()) - - validators.extend(Validator) - self.assertFalse(self.flushWarnings()) - - def test_accessing_default_types_warns(self): - Validator = validators.create(meta_schema={}, validators={}) - self.assertFalse(self.flushWarnings()) - - self.assertWarns( - DeprecationWarning, - ( - "The DEFAULT_TYPES attribute is deprecated. " - "See the type checker attached to this validator instead." - ), - # https://tm.tl/9363 :'( - sys.modules[self.assertWarns.__module__].__file__, - - getattr, - Validator, - "DEFAULT_TYPES", - ) - - def test_accessing_default_types_on_the_instance_warns(self): - Validator = validators.create(meta_schema={}, validators={}) - self.assertFalse(self.flushWarnings()) - - self.assertWarns( - DeprecationWarning, - ( - "The DEFAULT_TYPES attribute is deprecated. " - "See the type checker attached to this validator instead." - ), - # https://tm.tl/9363 :'( - sys.modules[self.assertWarns.__module__].__file__, - - getattr, - Validator({}), - "DEFAULT_TYPES", - ) - - def test_providing_types_to_init_warns(self): - Validator = validators.create(meta_schema={}, validators={}) - self.assertFalse(self.flushWarnings()) - - self.assertWarns( - category=DeprecationWarning, - message=( - "The types argument is deprecated. " - "Provide a type_checker to jsonschema.validators.extend " - "instead." - ), - # https://tm.tl/9363 :'( - filename=sys.modules[self.assertWarns.__module__].__file__, - - f=Validator, - schema={}, - types={"bar": object}, - ) - - -class TestIterErrors(TestCase): - def setUp(self): - self.validator = validators.Draft3Validator({}) - - def test_iter_errors(self): - instance = [1, 2] - schema = { - u"disallow": u"array", - u"enum": [["a", "b", "c"], ["d", "e", "f"]], - u"minItems": 3, - } - - got = (e.message for e in self.validator.iter_errors(instance, schema)) - expected = [ - "%r is disallowed for [1, 2]" % (schema["disallow"],), - "[1, 2] is too short", - "[1, 2] is not one of %r" % (schema["enum"],), - ] - self.assertEqual(sorted(got), sorted(expected)) - - def test_iter_errors_multiple_failures_one_validator(self): - instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"} - schema = { - u"properties": { - "foo": {u"type": "string"}, - "bar": {u"minItems": 2}, - "baz": {u"maximum": 10, u"enum": [2, 4, 6, 8]}, - }, - } - - errors = list(self.validator.iter_errors(instance, schema)) - self.assertEqual(len(errors), 4) - - class TestValidationErrorMessages(TestCase): def message_for(self, instance, schema, *args, **kwargs): - kwargs.setdefault("cls", validators.Draft3Validator) - with self.assertRaises(exceptions.ValidationError) as e: - validators.validate(instance, schema, *args, **kwargs) - return e.exception.message + cls = kwargs.pop("cls", validators._LATEST_VERSION) + cls.check_schema(schema) + validator = cls(schema, *args, **kwargs) + errors = list(validator.iter_errors(instance)) + self.assertTrue(errors, msg=f"No errors were raised for {instance!r}") + self.assertEqual( + len(errors), + 1, + msg=f"Expected exactly one error, found {errors!r}", + ) + return errors[0].message def test_single_type_failure(self): - message = self.message_for(instance=1, schema={u"type": u"string"}) - self.assertEqual(message, "1 is not of type %r" % u"string") + message = self.message_for(instance=1, schema={"type": "string"}) + self.assertEqual(message, "1 is not of type 'string'") def test_single_type_list_failure(self): - message = self.message_for(instance=1, schema={u"type": [u"string"]}) - self.assertEqual(message, "1 is not of type %r" % u"string") + message = self.message_for(instance=1, schema={"type": ["string"]}) + self.assertEqual(message, "1 is not of type 'string'") def test_multiple_type_failure(self): - types = u"string", u"object" - message = self.message_for(instance=1, schema={u"type": list(types)}) - self.assertEqual(message, "1 is not of type %r, %r" % types) + types = "string", "object" + message = self.message_for(instance=1, schema={"type": list(types)}) + self.assertEqual(message, "1 is not of type 'string', 'object'") def test_object_without_title_type_failure(self): - type = {u"type": [{u"minimum": 3}]} - message = self.message_for(instance=1, schema={u"type": [type]}) - self.assertEqual(message, "1 is less than the minimum of 3") + type = {"type": [{"minimum": 3}]} + message = self.message_for( + instance=1, + schema={"type": [type]}, + cls=validators.Draft3Validator, + ) + self.assertEqual( + message, + "1 is not of type {'type': [{'minimum': 3}]}", + ) def test_object_with_named_type_failure(self): - schema = {u"type": [{u"name": "Foo", u"minimum": 3}]} - message = self.message_for(instance=1, schema=schema) - self.assertEqual(message, "1 is less than the minimum of 3") + schema = {"type": [{"name": "Foo", "minimum": 3}]} + message = self.message_for( + instance=1, + schema=schema, + cls=validators.Draft3Validator, + ) + self.assertEqual(message, "1 is not of type 'Foo'") def test_minimum(self): message = self.message_for(instance=1, schema={"minimum": 2}) @@ -470,56 +300,58 @@ def test_maximum(self): def test_dependencies_single_element(self): depend, on = "bar", "foo" - schema = {u"dependencies": {depend: on}} + schema = {"dependencies": {depend: on}} message = self.message_for( instance={"bar": 2}, schema=schema, cls=validators.Draft3Validator, ) - self.assertEqual(message, "%r is a dependency of %r" % (on, depend)) + self.assertEqual(message, "'foo' is a dependency of 'bar'") def test_dependencies_list_draft3(self): depend, on = "bar", "foo" - schema = {u"dependencies": {depend: [on]}} + schema = {"dependencies": {depend: [on]}} message = self.message_for( instance={"bar": 2}, schema=schema, cls=validators.Draft3Validator, ) - self.assertEqual(message, "%r is a dependency of %r" % (on, depend)) + self.assertEqual(message, "'foo' is a dependency of 'bar'") def test_dependencies_list_draft7(self): depend, on = "bar", "foo" - schema = {u"dependencies": {depend: [on]}} + schema = {"dependencies": {depend: [on]}} message = self.message_for( instance={"bar": 2}, schema=schema, cls=validators.Draft7Validator, ) - self.assertEqual(message, "%r is a dependency of %r" % (on, depend)) + self.assertEqual(message, "'foo' is a dependency of 'bar'") def test_additionalItems_single_failure(self): message = self.message_for( instance=[2], - schema={u"items": [], u"additionalItems": False}, + schema={"items": [], "additionalItems": False}, + cls=validators.Draft3Validator, ) self.assertIn("(2 was unexpected)", message) def test_additionalItems_multiple_failures(self): message = self.message_for( instance=[1, 2, 3], - schema={u"items": [], u"additionalItems": False} + schema={"items": [], "additionalItems": False}, + cls=validators.Draft3Validator, ) self.assertIn("(1, 2, 3 were unexpected)", message) def test_additionalProperties_single_failure(self): additional = "foo" - schema = {u"additionalProperties": False} + schema = {"additionalProperties": False} message = self.message_for(instance={additional: 2}, schema=schema) - self.assertIn("(%r was unexpected)" % (additional,), message) + self.assertIn("('foo' was unexpected)", message) def test_additionalProperties_multiple_failures(self): - schema = {u"additionalProperties": False} + schema = {"additionalProperties": False} message = self.message_for( instance=dict.fromkeys(["foo", "bar"]), schema=schema, @@ -530,31 +362,30 @@ def test_additionalProperties_multiple_failures(self): self.assertIn("were unexpected)", message) def test_const(self): - schema = {u"const": 12} + schema = {"const": 12} message = self.message_for( instance={"foo": "bar"}, schema=schema, - cls=validators.Draft6Validator, ) self.assertIn("12 was expected", message) - def test_contains(self): - schema = {u"contains": {u"const": 12}} + def test_contains_draft_6(self): + schema = {"contains": {"const": 12}} message = self.message_for( instance=[2, {}, []], schema=schema, cls=validators.Draft6Validator, ) - self.assertIn( - "None of [2, {}, []] are valid under the given schema", + self.assertEqual( message, + "None of [2, {}, []] are valid under the given schema", ) def test_invalid_format_default_message(self): checker = FormatChecker(formats=()) - checker.checks(u"thing")(lambda value: False) + checker.checks("thing")(lambda value: False) - schema = {u"format": u"thing"} + schema = {"format": "thing"} message = self.message_for( instance="bla", schema=schema, @@ -566,32 +397,32 @@ def test_invalid_format_default_message(self): self.assertIn("is not a", message) def test_additionalProperties_false_patternProperties(self): - schema = {u"type": u"object", - u"additionalProperties": False, - u"patternProperties": { - u"^abc$": {u"type": u"string"}, - u"^def$": {u"type": u"string"}, + schema = {"type": "object", + "additionalProperties": False, + "patternProperties": { + "^abc$": {"type": "string"}, + "^def$": {"type": "string"}, }} message = self.message_for( - instance={u"zebra": 123}, + instance={"zebra": 123}, schema=schema, cls=validators.Draft4Validator, ) self.assertEqual( message, "{} does not match any of the regexes: {}, {}".format( - repr(u"zebra"), repr(u"^abc$"), repr(u"^def$"), + repr("zebra"), repr("^abc$"), repr("^def$"), ), ) message = self.message_for( - instance={u"zebra": 123, u"fish": 456}, + instance={"zebra": 123, "fish": 456}, schema=schema, cls=validators.Draft4Validator, ) self.assertEqual( message, "{}, {} do not match any of the regexes: {}, {}".format( - repr(u"fish"), repr(u"zebra"), repr(u"^abc$"), repr(u"^def$") + repr("fish"), repr("zebra"), repr("^abc$"), repr("^def$"), ), ) @@ -599,9 +430,187 @@ def test_False_schema(self): message = self.message_for( instance="something", schema=False, - cls=validators.Draft7Validator, ) - self.assertIn("False schema does not allow 'something'", message) + self.assertEqual(message, "False schema does not allow 'something'") + + def test_multipleOf(self): + message = self.message_for( + instance=3, + schema={"multipleOf": 2}, + ) + self.assertEqual(message, "3 is not a multiple of 2") + + def test_minItems(self): + message = self.message_for(instance=[], schema={"minItems": 2}) + self.assertEqual(message, "[] is too short") + + def test_maxItems(self): + message = self.message_for(instance=[1, 2, 3], schema={"maxItems": 2}) + self.assertEqual(message, "[1, 2, 3] is too long") + + def test_prefixItems_with_items(self): + message = self.message_for( + instance=[1, 2, "foo", 5], + schema={"items": False, "prefixItems": [{}, {}]}, + ) + self.assertEqual(message, "Expected at most 2 items, but found 4") + + def test_minLength(self): + message = self.message_for( + instance="", + schema={"minLength": 2}, + ) + self.assertEqual(message, "'' is too short") + + def test_maxLength(self): + message = self.message_for( + instance="abc", + schema={"maxLength": 2}, + ) + self.assertEqual(message, "'abc' is too long") + + def test_pattern(self): + message = self.message_for( + instance="bbb", + schema={"pattern": "^a*$"}, + ) + self.assertEqual(message, "'bbb' does not match '^a*$'") + + def test_does_not_contain(self): + message = self.message_for( + instance=[], + schema={"contains": {"type": "string"}}, + ) + self.assertEqual( + message, + "[] does not contain items matching the given schema", + ) + + def test_contains_too_few(self): + message = self.message_for( + instance=["foo", 1], + schema={"contains": {"type": "string"}, "minContains": 2}, + ) + self.assertEqual( + message, + "Too few items match the given schema " + "(expected at least 2 but only 1 matched)", + ) + + def test_contains_too_few_both_constrained(self): + message = self.message_for( + instance=["foo", 1], + schema={ + "contains": {"type": "string"}, + "minContains": 2, + "maxContains": 4, + }, + ) + self.assertEqual( + message, + "Too few items match the given schema (expected at least 2 but " + "only 1 matched)", + ) + + def test_contains_too_many(self): + message = self.message_for( + instance=["foo", "bar", "baz"], + schema={"contains": {"type": "string"}, "maxContains": 2}, + ) + self.assertEqual( + message, + "Too many items match the given schema (expected at most 2)", + ) + + def test_contains_too_many_both_constrained(self): + message = self.message_for( + instance=["foo"] * 5, + schema={ + "contains": {"type": "string"}, + "minContains": 2, + "maxContains": 4, + }, + ) + self.assertEqual( + message, + "Too many items match the given schema (expected at most 4)", + ) + + def test_exclusiveMinimum(self): + message = self.message_for( + instance=3, + schema={"exclusiveMinimum": 5}, + ) + self.assertEqual( + message, + "3 is less than or equal to the minimum of 5", + ) + + def test_exclusiveMaximum(self): + message = self.message_for(instance=3, schema={"exclusiveMaximum": 2}) + self.assertEqual( + message, + "3 is greater than or equal to the maximum of 2", + ) + + def test_required(self): + message = self.message_for(instance={}, schema={"required": ["foo"]}) + self.assertEqual(message, "'foo' is a required property") + + def test_dependentRequired(self): + message = self.message_for( + instance={"foo": {}}, + schema={"dependentRequired": {"foo": ["bar"]}}, + ) + self.assertEqual(message, "'bar' is a dependency of 'foo'") + + def test_minProperties(self): + message = self.message_for(instance={}, schema={"minProperties": 2}) + self.assertEqual(message, "{} does not have enough properties") + + def test_maxProperties(self): + message = self.message_for( + instance={"a": {}, "b": {}, "c": {}}, + schema={"maxProperties": 2}, + ) + self.assertEqual( + message, + "{'a': {}, 'b': {}, 'c': {}} has too many properties", + ) + + def test_oneOf_matches_none(self): + message = self.message_for(instance={}, schema={"oneOf": [False]}) + self.assertEqual( + message, + "{} is not valid under any of the given schemas", + ) + + def test_oneOf_matches_too_many(self): + message = self.message_for(instance={}, schema={"oneOf": [True, True]}) + self.assertEqual(message, "{} is valid under each of True, True") + + def test_unevaluated_items(self): + schema = {"type": "array", "unevaluatedItems": False} + message = self.message_for(instance=["foo", "bar"], schema=schema) + self.assertIn( + message, + "Unevaluated items are not allowed ('foo', 'bar' were unexpected)", + ) + + def test_unevaluated_properties(self): + schema = {"type": "object", "unevaluatedProperties": False} + message = self.message_for( + instance={ + "foo": "foo", + "bar": "bar", + }, + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated properties are not allowed " + "('foo', 'bar' were unexpected)", + ) class TestValidationErrorDetails(TestCase): @@ -630,6 +639,7 @@ def test_anyOf(self): self.assertEqual(e.path, deque([])) self.assertEqual(e.relative_path, deque([])) self.assertEqual(e.absolute_path, deque([])) + self.assertEqual(e.json_path, "$") self.assertEqual(e.schema_path, deque(["anyOf"])) self.assertEqual(e.relative_schema_path, deque(["anyOf"])) @@ -648,6 +658,7 @@ def test_anyOf(self): self.assertEqual(e1.path, deque([])) self.assertEqual(e1.absolute_path, deque([])) self.assertEqual(e1.relative_path, deque([])) + self.assertEqual(e1.json_path, "$") self.assertEqual(e1.schema_path, deque([0, "minimum"])) self.assertEqual(e1.relative_schema_path, deque([0, "minimum"])) @@ -666,6 +677,7 @@ def test_anyOf(self): self.assertEqual(e2.path, deque([])) self.assertEqual(e2.relative_path, deque([])) self.assertEqual(e2.absolute_path, deque([])) + self.assertEqual(e2.json_path, "$") self.assertEqual(e2.schema_path, deque([1, "type"])) self.assertEqual(e2.relative_schema_path, deque([1, "type"])) @@ -699,6 +711,7 @@ def test_type(self): self.assertEqual(e.path, deque([])) self.assertEqual(e.relative_path, deque([])) self.assertEqual(e.absolute_path, deque([])) + self.assertEqual(e.json_path, "$") self.assertEqual(e.schema_path, deque(["type"])) self.assertEqual(e.relative_schema_path, deque(["type"])) @@ -717,6 +730,7 @@ def test_type(self): self.assertEqual(e1.path, deque([])) self.assertEqual(e1.relative_path, deque([])) self.assertEqual(e1.absolute_path, deque([])) + self.assertEqual(e1.json_path, "$") self.assertEqual(e1.schema_path, deque([0, "type"])) self.assertEqual(e1.relative_schema_path, deque([0, "type"])) @@ -727,12 +741,13 @@ def test_type(self): self.assertEqual(e2.validator, "enum") self.assertEqual(e2.validator_value, [2]) self.assertEqual(e2.instance, 1) - self.assertEqual(e2.schema, {u"enum": [2]}) + self.assertEqual(e2.schema, {"enum": [2]}) self.assertIs(e2.parent, e) self.assertEqual(e2.path, deque(["foo"])) self.assertEqual(e2.relative_path, deque(["foo"])) self.assertEqual(e2.absolute_path, deque(["foo"])) + self.assertEqual(e2.json_path, "$.foo") self.assertEqual( e2.schema_path, deque([1, "properties", "foo", "enum"]), @@ -776,6 +791,11 @@ def test_single_nesting(self): self.assertEqual(e3.absolute_path, deque(["baz"])) self.assertEqual(e4.absolute_path, deque(["foo"])) + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.baz") + self.assertEqual(e3.json_path, "$.baz") + self.assertEqual(e4.json_path, "$.foo") + self.assertEqual(e1.validator, "minItems") self.assertEqual(e2.validator, "enum") self.assertEqual(e3.validator, "maximum") @@ -811,6 +831,13 @@ def test_multiple_nesting(self): self.assertEqual(e5.path, deque([1, "bar", "baz"])) self.assertEqual(e6.path, deque([1, "foo"])) + self.assertEqual(e1.json_path, "$") + self.assertEqual(e2.json_path, "$[0]") + self.assertEqual(e3.json_path, "$[1].bar") + self.assertEqual(e4.json_path, "$[1].bar.bar") + self.assertEqual(e5.json_path, "$[1].bar.baz") + self.assertEqual(e6.json_path, "$[1].foo") + self.assertEqual(e1.schema_path, deque(["type"])) self.assertEqual(e2.schema_path, deque(["items", "type"])) self.assertEqual( @@ -822,7 +849,7 @@ def test_multiple_nesting(self): ) self.assertEqual( list(e5.schema_path), - ["items", "properties", "bar", "properties", "baz", "minItems"] + ["items", "properties", "bar", "properties", "baz", "minItems"], ) self.assertEqual( list(e6.schema_path), ["items", "properties", "foo", "enum"], @@ -886,6 +913,7 @@ def test_recursive(self): self.assertEqual( e.absolute_schema_path, deque(["properties", "root", "anyOf"]), ) + self.assertEqual(e.json_path, "$.root") e1, = e.context self.assertEqual(e1.absolute_path, deque(["root", "children", "a"])) @@ -904,6 +932,7 @@ def test_recursive(self): ], ), ) + self.assertEqual(e1.json_path, "$.root.children.a") e2, = e1.context self.assertEqual( @@ -932,6 +961,7 @@ def test_recursive(self): ], ), ) + self.assertEqual(e2.json_path, "$.root.children.a.children.ab") def test_additionalProperties(self): instance = {"bar": "bar", "foo": 2} @@ -944,6 +974,9 @@ def test_additionalProperties(self): self.assertEqual(e1.path, deque(["bar"])) self.assertEqual(e2.path, deque(["foo"])) + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.foo") + self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "minimum") @@ -963,6 +996,9 @@ def test_patternProperties(self): self.assertEqual(e1.path, deque(["bar"])) self.assertEqual(e2.path, deque(["foo"])) + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.foo") + self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "minimum") @@ -980,6 +1016,9 @@ def test_additionalItems(self): self.assertEqual(e1.path, deque([0])) self.assertEqual(e2.path, deque([1])) + self.assertEqual(e1.json_path, "$[0]") + self.assertEqual(e2.json_path, "$[1]") + self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "minimum") @@ -997,6 +1036,9 @@ def test_additionalItems_with_items(self): self.assertEqual(e1.path, deque([1])) self.assertEqual(e2.path, deque([2])) + self.assertEqual(e1.json_path, "$[1]") + self.assertEqual(e2.json_path, "$[2]") + self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "minimum") @@ -1010,9 +1052,10 @@ def test_propertyNames(self): self.assertEqual(error.validator, "not") self.assertEqual( error.message, - "%r is not allowed for %r" % ({"const": "foo"}, "foo"), + "'foo' should not be valid under {'const': 'foo'}", ) self.assertEqual(error.path, deque([])) + self.assertEqual(error.json_path, "$") self.assertEqual(error.schema_path, deque(["propertyNames", "not"])) def test_if_then(self): @@ -1027,7 +1070,8 @@ def test_if_then(self): self.assertEqual(error.validator, "const") self.assertEqual(error.message, "13 was expected") self.assertEqual(error.path, deque([])) - self.assertEqual(error.schema_path, deque(["if", "then", "const"])) + self.assertEqual(error.json_path, "$") + self.assertEqual(error.schema_path, deque(["then", "const"])) def test_if_else(self): schema = { @@ -1041,7 +1085,8 @@ def test_if_else(self): self.assertEqual(error.validator, "const") self.assertEqual(error.message, "13 was expected") self.assertEqual(error.path, deque([])) - self.assertEqual(error.schema_path, deque(["if", "else", "const"])) + self.assertEqual(error.json_path, "$") + self.assertEqual(error.schema_path, deque(["else", "const"])) def test_boolean_schema_False(self): validator = validators.Draft7Validator(False) @@ -1055,6 +1100,7 @@ def test_boolean_schema_False(self): error.instance, error.schema, error.schema_path, + error.json_path, ), ( "False schema does not allow 12", @@ -1063,6 +1109,7 @@ def test_boolean_schema_False(self): 12, False, deque([]), + "$", ), ) @@ -1083,6 +1130,7 @@ def test_ref(self): error.absolute_path, error.schema, error.schema_path, + error.json_path, ), ( "'notAnInteger' is not of type 'integer'", @@ -1092,6 +1140,258 @@ def test_ref(self): deque(["foo"]), {"type": "integer"}, deque(["additionalProperties", "type"]), + "$.foo", + ), + ) + + def test_prefixItems(self): + schema = {"prefixItems": [{"type": "string"}, {}, {}, {"maximum": 3}]} + validator = validators.Draft202012Validator(schema) + type_error, min_error = validator.iter_errors([1, 2, "foo", 5]) + self.assertEqual( + ( + type_error.message, + type_error.validator, + type_error.validator_value, + type_error.instance, + type_error.absolute_path, + type_error.schema, + type_error.schema_path, + type_error.json_path, + ), + ( + "1 is not of type 'string'", + "type", + "string", + 1, + deque([0]), + {"type": "string"}, + deque(["prefixItems", 0, "type"]), + "$[0]", + ), + ) + self.assertEqual( + ( + min_error.message, + min_error.validator, + min_error.validator_value, + min_error.instance, + min_error.absolute_path, + min_error.schema, + min_error.schema_path, + min_error.json_path, + ), + ( + "5 is greater than the maximum of 3", + "maximum", + 3, + 5, + deque([3]), + {"maximum": 3}, + deque(["prefixItems", 3, "maximum"]), + "$[3]", + ), + ) + + def test_prefixItems_with_items(self): + schema = { + "items": {"type": "string"}, + "prefixItems": [{}], + } + validator = validators.Draft202012Validator(schema) + e1, e2 = validator.iter_errors(["foo", 2, "bar", 4, "baz"]) + self.assertEqual( + ( + e1.message, + e1.validator, + e1.validator_value, + e1.instance, + e1.absolute_path, + e1.schema, + e1.schema_path, + e1.json_path, + ), + ( + "2 is not of type 'string'", + "type", + "string", + 2, + deque([1]), + {"type": "string"}, + deque(["items", "type"]), + "$[1]", + ), + ) + self.assertEqual( + ( + e2.message, + e2.validator, + e2.validator_value, + e2.instance, + e2.absolute_path, + e2.schema, + e2.schema_path, + e2.json_path, + ), + ( + "4 is not of type 'string'", + "type", + "string", + 4, + deque([3]), + {"type": "string"}, + deque(["items", "type"]), + "$[3]", + ), + ) + + def test_contains_too_many(self): + """ + `contains` + `maxContains` produces only one error, even if there are + many more incorrectly matching elements. + """ + schema = {"contains": {"type": "string"}, "maxContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors(["foo", 2, "bar", 4, "baz", "quux"]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "Too many items match the given schema (expected at most 2)", + "maxContains", + 2, + ["foo", 2, "bar", 4, "baz", "quux"], + deque([]), + {"contains": {"type": "string"}, "maxContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_contains_too_few(self): + schema = {"contains": {"type": "string"}, "minContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors(["foo", 2, 4]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + ( + "Too few items match the given schema " + "(expected at least 2 but only 1 matched)" + ), + "minContains", + 2, + ["foo", 2, 4], + deque([]), + {"contains": {"type": "string"}, "minContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_contains_none(self): + schema = {"contains": {"type": "string"}, "minContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors([2, 4]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "[2, 4] does not contain items matching the given schema", + "contains", + {"type": "string"}, + [2, 4], + deque([]), + {"contains": {"type": "string"}, "minContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_ref_sibling(self): + schema = { + "$defs": {"foo": {"required": ["bar"]}}, + "properties": { + "aprop": { + "$ref": "#/$defs/foo", + "required": ["baz"], + }, + }, + } + + validator = validators.Draft202012Validator(schema) + e1, e2 = validator.iter_errors({"aprop": {}}) + self.assertEqual( + ( + e1.message, + e1.validator, + e1.validator_value, + e1.instance, + e1.absolute_path, + e1.schema, + e1.schema_path, + e1.relative_schema_path, + e1.json_path, + ), + ( + "'bar' is a required property", + "required", + ["bar"], + {}, + deque(["aprop"]), + {"required": ["bar"]}, + deque(["properties", "aprop", "required"]), + deque(["properties", "aprop", "required"]), + "$.aprop", + ), + ) + self.assertEqual( + ( + e2.message, + e2.validator, + e2.validator_value, + e2.instance, + e2.absolute_path, + e2.schema, + e2.schema_path, + e2.relative_schema_path, + e2.json_path, + ), + ( + "'baz' is a required property", + "required", + ["baz"], + {}, + deque(["aprop"]), + {"$ref": "#/$defs/foo", "required": ["baz"]}, + deque(["properties", "aprop", "required"]), + deque(["properties", "aprop", "required"]), + "$.aprop", ), ) @@ -1125,6 +1425,9 @@ def test_enum_allows_non_unique_items(self): class ValidatorTestMixin(MetaSchemaTestsMixin, object): + def test_it_implements_the_validator_protocol(self): + self.assertIsInstance(self.Validator({}), protocols.Validator) + def test_valid_instances_are_valid(self): schema, instance = self.valid self.assertTrue(self.Validator(schema).is_valid(instance)) @@ -1178,16 +1481,6 @@ def test_is_type_evades_bool_inheriting_from_int(self): self.assertFalse(self.Validator({}).is_type(True, "integer")) self.assertFalse(self.Validator({}).is_type(True, "number")) - @unittest.skipIf(PY3, "In Python 3 json.load always produces unicode") - def test_string_a_bytestring_is_a_string(self): - self.Validator({"type": "string"}).validate(b"foo") - - def test_patterns_can_be_native_strings(self): - """ - See https://github.com/Julian/jsonschema/issues/611. - """ - self.Validator({"pattern": "foo"}).validate("foo") - def test_it_can_validate_with_decimals(self): schema = {"items": {"type": "number"}} Validator = validators.extend( @@ -1197,7 +1490,7 @@ def test_it_can_validate_with_decimals(self): lambda checker, thing: isinstance( thing, (int, float, Decimal), ) and not isinstance(thing, bool), - ) + ), ) validator = Validator(schema) @@ -1251,7 +1544,7 @@ def test_non_string_custom_type(self): type_checker=self.Validator.TYPE_CHECKER.redefine( non_string_type, lambda checker, thing: isinstance(thing, int), - ) + ), ) Crazy(schema).validate(15) @@ -1266,12 +1559,78 @@ def test_it_properly_formats_tuples_in_errors(self): type_checker=self.Validator.TYPE_CHECKER.redefine( "array", lambda checker, thing: isinstance(thing, tuple), - ) + ), ) with self.assertRaises(exceptions.ValidationError) as e: TupleValidator({"uniqueItems": True}).validate((1, 1)) self.assertIn("(1, 1) has non-unique elements", str(e.exception)) + def test_check_redefined_sequence(self): + """ + Allow array to validate against another defined sequence type + """ + schema = {"type": "array", "uniqueItems": True} + MyMapping = namedtuple("MyMapping", "a, b") + Validator = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine_many( + { + "array": lambda checker, thing: isinstance( + thing, (list, deque), + ), + "object": lambda checker, thing: isinstance( + thing, (dict, MyMapping), + ), + }, + ), + ) + validator = Validator(schema) + + valid_instances = [ + deque(["a", None, "1", "", True]), + deque([[False], [0]]), + [deque([False]), deque([0])], + [[deque([False])], [deque([0])]], + [[[[[deque([False])]]]], [[[[deque([0])]]]]], + [deque([deque([False])]), deque([deque([0])])], + [MyMapping("a", 0), MyMapping("a", False)], + [ + MyMapping("a", [deque([0])]), + MyMapping("a", [deque([False])]), + ], + [ + MyMapping("a", [MyMapping("a", deque([0]))]), + MyMapping("a", [MyMapping("a", deque([False]))]), + ], + [deque(deque(deque([False]))), deque(deque(deque([0])))], + ] + + for instance in valid_instances: + validator.validate(instance) + + invalid_instances = [ + deque(["a", "b", "a"]), + deque([[False], [False]]), + [deque([False]), deque([False])], + [[deque([False])], [deque([False])]], + [[[[[deque([False])]]]], [[[[deque([False])]]]]], + [deque([deque([False])]), deque([deque([False])])], + [MyMapping("a", False), MyMapping("a", False)], + [ + MyMapping("a", [deque([False])]), + MyMapping("a", [deque([False])]), + ], + [ + MyMapping("a", [MyMapping("a", deque([False]))]), + MyMapping("a", [MyMapping("a", deque([False]))]), + ], + [deque(deque(deque([False]))), deque(deque(deque([False])))], + ] + + for instance in invalid_instances: + with self.assertRaises(exceptions.ValidationError): + validator.validate(instance) + class AntiDraft6LeakMixin(object): """ @@ -1305,7 +1664,7 @@ def test_False_is_not_a_schema_even_if_you_forget_to_check(self): class TestDraft3Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase): Validator = validators.Draft3Validator - valid = {}, {} + valid: tuple[dict, dict] = ({}, {}) invalid = {"type": "integer"}, "foo" def test_any_type_is_valid_for_type_any(self): @@ -1320,7 +1679,7 @@ def test_any_type_is_redefinable(self): self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine( "any", lambda checker, thing: isinstance(thing, int), - ) + ), ) validator = Crazy({"type": "any"}) validator.validate(12) @@ -1328,32 +1687,44 @@ def test_any_type_is_redefinable(self): validator.validate("foo") def test_is_type_is_true_for_any_type(self): - self.assertTrue(self.Validator({}).is_valid(object(), {"type": "any"})) + self.assertTrue(self.Validator({"type": "any"}).is_valid(object())) def test_is_type_does_not_evade_bool_if_it_is_being_tested(self): self.assertTrue(self.Validator({}).is_type(True, "boolean")) - self.assertTrue(self.Validator({}).is_valid(True, {"type": "any"})) + self.assertTrue(self.Validator({"type": "any"}).is_valid(True)) class TestDraft4Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase): Validator = validators.Draft4Validator - valid = {}, {} + valid: tuple[dict, dict] = ({}, {}) invalid = {"type": "integer"}, "foo" class TestDraft6Validator(ValidatorTestMixin, TestCase): Validator = validators.Draft6Validator - valid = {}, {} + valid: tuple[dict, dict] = ({}, {}) invalid = {"type": "integer"}, "foo" class TestDraft7Validator(ValidatorTestMixin, TestCase): Validator = validators.Draft7Validator - valid = {}, {} + valid: tuple[dict, dict] = ({}, {}) invalid = {"type": "integer"}, "foo" -class TestValidatorFor(SynchronousTestCase): +class TestDraft201909Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft201909Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft202012Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft202012Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestValidatorFor(TestCase): def test_draft_3(self): schema = {"$schema": "http://json-schema.org/draft-03/schema"} self.assertIs( @@ -1406,6 +1777,32 @@ def test_draft_7(self): validators.Draft7Validator, ) + def test_draft_201909(self): + schema = {"$schema": "https://json-schema.org/draft/2019-09/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft201909Validator, + ) + + schema = {"$schema": "https://json-schema.org/draft/2019-09/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft201909Validator, + ) + + def test_draft_202012(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft202012Validator, + ) + + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft202012Validator, + ) + def test_True(self): self.assertIs( validators.validator_for(True), @@ -1448,31 +1845,29 @@ def test_validator_for_custom_default(self): self.assertIs(validators.validator_for({}, default=None), None) def test_warns_if_meta_schema_specified_was_not_found(self): - self.assertWarns( - category=DeprecationWarning, - message=( - "The metaschema specified by $schema was not found. " - "Using the latest draft to validate, but this will raise " - "an error in the future." - ), - # https://tm.tl/9363 :'( - filename=sys.modules[self.assertWarns.__module__].__file__, + with self.assertWarns(DeprecationWarning) as cm: + validators.validator_for(schema={"$schema": "unknownSchema"}) - f=validators.validator_for, - schema={u"$schema": "unknownSchema"}, - default={}, + self.assertEqual(cm.filename, __file__) + self.assertEqual( + str(cm.warning), + "The metaschema specified by $schema was not found. " + "Using the latest draft to validate, but this will raise " + "an error in the future.", ) def test_does_not_warn_if_meta_schema_is_unspecified(self): - validators.validator_for(schema={}, default={}), - self.assertFalse(self.flushWarnings()) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + validators.validator_for(schema={}, default={}) + self.assertFalse(w) -class TestValidate(SynchronousTestCase): +class TestValidate(TestCase): def assertUses(self, schema, Validator): result = [] - self.patch(Validator, "check_schema", result.append) - validators.validate({}, schema) + with mock.patch.object(Validator, "check_schema", result.append): + validators.validate({}, schema) self.assertEqual(result, [schema]) def test_draft3_validator_is_chosen(self): @@ -1519,23 +1914,38 @@ def test_draft7_validator_is_chosen(self): Validator=validators.Draft7Validator, ) - def test_draft7_validator_is_the_default(self): - self.assertUses(schema={}, Validator=validators.Draft7Validator) + def test_draft202012_validator_is_chosen(self): + self.assertUses( + schema={ + "$schema": "https://json-schema.org/draft/2020-12/schema#", + }, + Validator=validators.Draft202012Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={ + "$schema": "https://json-schema.org/draft/2020-12/schema", + }, + Validator=validators.Draft202012Validator, + ) + + def test_draft202012_validator_is_the_default(self): + self.assertUses(schema={}, Validator=validators.Draft202012Validator) def test_validation_error_message(self): with self.assertRaises(exceptions.ValidationError) as e: validators.validate(12, {"type": "string"}) - self.assertRegexpMatches( + self.assertRegex( str(e.exception), - "(?s)Failed validating u?'.*' in schema.*On instance", + "(?s)Failed validating '.*' in schema.*On instance", ) def test_schema_error_message(self): with self.assertRaises(exceptions.SchemaError) as e: validators.validate(12, {"type": 12}) - self.assertRegexpMatches( + self.assertRegex( str(e.exception), - "(?s)Failed validating u?'.*' in metaschema.*On schema", + "(?s)Failed validating '.*' in metaschema.*On schema", ) def test_it_uses_best_match(self): @@ -1546,7 +1956,7 @@ def test_it_uses_best_match(self): self.assertIn("12 is not of type", str(e.exception)) -class TestRefResolver(SynchronousTestCase): +class TestRefResolver(TestCase): base_uri = "" stored_uri = "foo://stored" @@ -1561,14 +1971,11 @@ def setUp(self): def test_it_does_not_retrieve_schema_urls_from_the_network(self): ref = validators.Draft3Validator.META_SCHEMA["id"] - self.patch( - self.resolver, - "resolve_remote", - lambda *args, **kwargs: self.fail("Should not have been called!"), - ) - with self.resolver.resolving(ref) as resolved: - pass + with mock.patch.object(self.resolver, "resolve_remote") as patched: + with self.resolver.resolving(ref) as resolved: + pass self.assertEqual(resolved, validators.Draft3Validator.META_SCHEMA) + self.assertFalse(patched.called) def test_it_resolves_local_refs(self): ref = "#/properties/foo" @@ -1580,7 +1987,7 @@ def test_it_resolves_local_refs_with_id(self): schema = {"id": "http://bar/schema#", "a": {"foo": "bar"}} resolver = validators.RefResolver.from_schema( schema, - id_of=lambda schema: schema.get(u"id", u""), + id_of=lambda schema: schema.get("id", ""), ) with resolver.resolving("#/a") as resolved: self.assertEqual(resolved, schema["a"]) @@ -1643,7 +2050,7 @@ def test_it_can_construct_a_base_uri_from_a_schema(self): schema = {"id": "foo"} resolver = validators.RefResolver.from_schema( schema, - id_of=lambda schema: schema.get(u"id", u""), + id_of=lambda schema: schema.get("id", ""), ) self.assertEqual(resolver.base_uri, "foo") self.assertEqual(resolver.resolution_scope, "foo") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/validators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/validators.py old mode 100755 new mode 100644 index 1dc420c..a689e51 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/validators.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema/validators.py @@ -1,14 +1,21 @@ """ Creation and extension of validators, with implementations for existing drafts. """ -from __future__ import division +from __future__ import annotations +from collections import deque +from collections.abc import Sequence +from functools import lru_cache +from urllib.parse import unquote, urldefrag, urljoin, urlsplit +from urllib.request import urlopen from warnings import warn import contextlib import json -import numbers +import reprlib +import typing +import warnings -from six import add_metaclass +import attr from jsonschema import ( _legacy_validators, @@ -17,89 +24,39 @@ _validators, exceptions, ) -from jsonschema.compat import ( - Sequence, - int_types, - iteritems, - lru_cache, - str_types, - unquote, - urldefrag, - urljoin, - urlopen, - urlsplit, -) - -# Sigh. https://gitlab.com/pycqa/flake8/issues/280 -# https://github.com/pyga/ebb-lint/issues/7 -# Imported for backwards compatibility. -from jsonschema.exceptions import ErrorTree -ErrorTree - - -class _DontDoThat(Exception): - """ - Raised when a Validators with non-default type checker is misused. - - Asking one for DEFAULT_TYPES doesn't make sense, since type checkers - exist for the unrepresentable cases where DEFAULT_TYPES can't - represent the type relationship. - """ - - def __str__(self): - return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers" - - -validators = {} -meta_schemas = _utils.URIDict() - - -def _generate_legacy_type_checks(types=()): - """ - Generate newer-style type checks out of JSON-type-name-to-type mappings. - - Arguments: - types (dict): +_VALIDATORS: dict[str, typing.Any] = {} +_META_SCHEMAS = _utils.URIDict() +_VOCABULARIES: list[tuple[str, typing.Any]] = [] - A mapping of type names to their Python types - Returns: - - A dictionary of definitions to pass to `TypeChecker` - """ - types = dict(types) - - def gen_type_check(pytypes): - pytypes = _utils.flatten(pytypes) - - def type_check(checker, instance): - if isinstance(instance, bool): - if bool not in pytypes: - return False - return isinstance(instance, pytypes) - - return type_check - - definitions = {} - for typename, pytypes in iteritems(types): - definitions[typename] = gen_type_check(pytypes) - - return definitions - - -_DEPRECATED_DEFAULT_TYPES = { - u"array": list, - u"boolean": bool, - u"integer": int_types, - u"null": type(None), - u"number": numbers.Number, - u"object": dict, - u"string": str_types, -} -_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker( - type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES), -) +def __getattr__(name): + if name == "ErrorTree": + warnings.warn( + "Importing ErrorTree from jsonschema.validators is deprecated. " + "Instead import it from jsonschema.exceptions.", + DeprecationWarning, + stacklevel=2, + ) + from jsonschema.exceptions import ErrorTree + return ErrorTree + elif name == "validators": + warnings.warn( + "Accessing jsonschema.validators.validators is deprecated. " + "Use jsonschema.validators.validator_for with a given schema.", + DeprecationWarning, + stacklevel=2, + ) + return _VALIDATORS + elif name == "meta_schemas": + warnings.warn( + "Accessing jsonschema.validators.meta_schemas is deprecated. " + "Use jsonschema.validators.validator_for with a given schema.", + DeprecationWarning, + stacklevel=2, + ) + return _META_SCHEMAS + raise AttributeError(f"module {__name__} has no attribute {name}") def validates(version): @@ -117,63 +74,54 @@ def validates(version): Returns: - collections.Callable: + collections.abc.Callable: a class decorator to decorate the validator with the version """ def _validates(cls): - validators[version] = cls + _VALIDATORS[version] = cls meta_schema_id = cls.ID_OF(cls.META_SCHEMA) - if meta_schema_id: - meta_schemas[meta_schema_id] = cls + _META_SCHEMAS[meta_schema_id] = cls return cls return _validates -def _DEFAULT_TYPES(self): - if self._CREATED_WITH_DEFAULT_TYPES is None: - raise _DontDoThat() - - warn( - ( - "The DEFAULT_TYPES attribute is deprecated. " - "See the type checker attached to this validator instead." - ), - DeprecationWarning, - stacklevel=2, - ) - return self._DEFAULT_TYPES - - -class _DefaultTypesDeprecatingMetaClass(type): - DEFAULT_TYPES = property(_DEFAULT_TYPES) - - def _id_of(schema): + """ + Return the ID of a schema for recent JSON Schema drafts. + """ if schema is True or schema is False: - return u"" - return schema.get(u"$id", u"") + return "" + return schema.get("$id", "") + + +def _store_schema_list(): + if not _VOCABULARIES: + _VOCABULARIES.extend(_utils.load_schema("vocabularies").items()) + return [ + (id, validator.META_SCHEMA) for id, validator in _META_SCHEMAS.items() + ] + _VOCABULARIES def create( meta_schema, validators=(), version=None, - default_types=None, - type_checker=None, + type_checker=_types.draft7_type_checker, id_of=_id_of, + applicable_validators=lambda schema: schema.items(), ): """ Create a new validator class. Arguments: - meta_schema (collections.Mapping): + meta_schema (collections.abc.Mapping): the meta schema for the new validator class - validators (collections.Mapping): + validators (collections.abc.Mapping): a mapping from names to callables, where each callable will validate the schema property with the given name. @@ -201,107 +149,66 @@ def create( If unprovided, a `jsonschema.TypeChecker` will be created with a set of default types typical of JSON Schema drafts. - default_types (collections.Mapping): - - .. deprecated:: 3.0.0 - - Please use the type_checker argument instead. + id_of (collections.abc.Callable): - If set, it provides mappings of JSON types to Python types - that will be converted to functions and redefined in this - object's `jsonschema.TypeChecker`. + A function that given a schema, returns its ID. - id_of (collections.Callable): + applicable_validators (collections.abc.Callable): - A function that given a schema, returns its ID. + A function that given a schema, returns the list of applicable + validators (names and callables) which will be called to + validate the instance. Returns: - a new `jsonschema.IValidator` class + a new `jsonschema.protocols.Validator` class """ - if default_types is not None: - if type_checker is not None: - raise TypeError( - "Do not specify default_types when providing a type checker.", - ) - _created_with_default_types = True - warn( - ( - "The default_types argument is deprecated. " - "Use the type_checker argument instead." - ), - DeprecationWarning, - stacklevel=2, - ) - type_checker = _types.TypeChecker( - type_checkers=_generate_legacy_type_checks(default_types), - ) - else: - default_types = _DEPRECATED_DEFAULT_TYPES - if type_checker is None: - _created_with_default_types = False - type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES - elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES: - _created_with_default_types = False - else: - _created_with_default_types = None - - @add_metaclass(_DefaultTypesDeprecatingMetaClass) - class Validator(object): + @attr.s + class Validator: VALIDATORS = dict(validators) META_SCHEMA = dict(meta_schema) TYPE_CHECKER = type_checker ID_OF = staticmethod(id_of) - DEFAULT_TYPES = property(_DEFAULT_TYPES) - _DEFAULT_TYPES = dict(default_types) - _CREATED_WITH_DEFAULT_TYPES = _created_with_default_types + schema = attr.ib(repr=reprlib.repr) + resolver = attr.ib(default=None, repr=False) + format_checker = attr.ib(default=None) + evolve = attr.evolve - def __init__( - self, - schema, - types=(), - resolver=None, - format_checker=None, - ): - if types: - warn( - ( - "The types argument is deprecated. Provide " - "a type_checker to jsonschema.validators.extend " - "instead." - ), - DeprecationWarning, - stacklevel=2, + def __attrs_post_init__(self): + if self.resolver is None: + self.resolver = RefResolver.from_schema( + self.schema, + id_of=id_of, ) - self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many( - _generate_legacy_type_checks(types), - ) - - if resolver is None: - resolver = RefResolver.from_schema(schema, id_of=id_of) - - self.resolver = resolver - self.format_checker = format_checker - self.schema = schema - @classmethod def check_schema(cls, schema): for error in cls(cls.META_SCHEMA).iter_errors(schema): raise exceptions.SchemaError.create_from(error) def iter_errors(self, instance, _schema=None): - if _schema is None: + if _schema is not None: + warnings.warn( + ( + "Passing a schema to Validator.iter_errors " + "is deprecated and will be removed in a future " + "release. Call validator.evolve(schema=new_schema)." + "iter_errors(...) instead." + ), + DeprecationWarning, + stacklevel=2, + ) + else: _schema = self.schema if _schema is True: return elif _schema is False: yield exceptions.ValidationError( - "False schema does not allow %r" % (instance,), + f"False schema does not allow {instance!r}", validator=None, validator_value=None, instance=instance, @@ -313,13 +220,7 @@ def iter_errors(self, instance, _schema=None): if scope: self.resolver.push_scope(scope) try: - ref = _schema.get(u"$ref") - if ref is not None: - validators = [(u"$ref", ref)] - else: - validators = iteritems(_schema) - - for k, v in validators: + for k, v in applicable_validators(_schema): validator = self.VALIDATORS.get(k) if validator is None: continue @@ -333,7 +234,7 @@ def iter_errors(self, instance, _schema=None): instance=instance, schema=_schema, ) - if k != u"$ref": + if k not in {"if", "$ref"}: error.schema_path.appendleft(k) yield error finally: @@ -341,7 +242,7 @@ def iter_errors(self, instance, _schema=None): self.resolver.pop_scope() def descend(self, instance, schema, path=None, schema_path=None): - for error in self.iter_errors(instance, schema): + for error in self.evolve(schema=schema).iter_errors(instance): if path is not None: error.path.appendleft(path) if schema_path is not None: @@ -359,12 +260,26 @@ def is_type(self, instance, type): raise exceptions.UnknownType(type, instance, self.schema) def is_valid(self, instance, _schema=None): - error = next(self.iter_errors(instance, _schema), None) + if _schema is not None: + warnings.warn( + ( + "Passing a schema to Validator.is_valid is deprecated " + "and will be removed in a future release. Call " + "validator.evolve(schema=new_schema).is_valid(...) " + "instead." + ), + DeprecationWarning, + stacklevel=2, + ) + self = self.evolve(schema=_schema) + + error = next(self.iter_errors(instance), None) return error is None if version is not None: + safe = version.title().replace(" ", "").replace("-", "") + Validator.__name__ = Validator.__qualname__ = f"{safe}Validator" Validator = validates(version)(Validator) - Validator.__name__ = version.title().replace(" ", "") + "Validator" return Validator @@ -375,11 +290,11 @@ def extend(validator, validators=(), version=None, type_checker=None): Arguments: - validator (jsonschema.IValidator): + validator (jsonschema.protocols.Validator): an existing validator class - validators (collections.Mapping): + validators (collections.abc.Mapping): a mapping of new validator callables to extend with, whose structure is as in `create`. @@ -405,11 +320,12 @@ def extend(validator, validators=(), version=None, type_checker=None): a type checker, used when applying the :validator:`type` validator. If unprovided, the type checker of the extended - `jsonschema.IValidator` will be carried along.` + `jsonschema.protocols.Validator` will be carried along. Returns: - a new `jsonschema.IValidator` class extending the one provided + a new `jsonschema.protocols.Validator` class extending the one + provided .. note:: Meta Schemas @@ -427,12 +343,6 @@ def extend(validator, validators=(), version=None, type_checker=None): if type_checker is None: type_checker = validator.TYPE_CHECKER - elif validator._CREATED_WITH_DEFAULT_TYPES: - raise TypeError( - "Cannot extend a validator created with default_types " - "with a type_checker. Update the validator to use a " - "type_checker when created." - ) return create( meta_schema=validator.META_SCHEMA, validators=all_validators, @@ -445,148 +355,241 @@ def extend(validator, validators=(), version=None, type_checker=None): Draft3Validator = create( meta_schema=_utils.load_schema("draft3"), validators={ - u"$ref": _validators.ref, - u"additionalItems": _validators.additionalItems, - u"additionalProperties": _validators.additionalProperties, - u"dependencies": _legacy_validators.dependencies_draft3, - u"disallow": _legacy_validators.disallow_draft3, - u"divisibleBy": _validators.multipleOf, - u"enum": _validators.enum, - u"extends": _legacy_validators.extends_draft3, - u"format": _validators.format, - u"items": _legacy_validators.items_draft3_draft4, - u"maxItems": _validators.maxItems, - u"maxLength": _validators.maxLength, - u"maximum": _legacy_validators.maximum_draft3_draft4, - u"minItems": _validators.minItems, - u"minLength": _validators.minLength, - u"minimum": _legacy_validators.minimum_draft3_draft4, - u"pattern": _validators.pattern, - u"patternProperties": _validators.patternProperties, - u"properties": _legacy_validators.properties_draft3, - u"type": _legacy_validators.type_draft3, - u"uniqueItems": _validators.uniqueItems, + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "dependencies": _legacy_validators.dependencies_draft3, + "disallow": _legacy_validators.disallow_draft3, + "divisibleBy": _validators.multipleOf, + "enum": _validators.enum, + "extends": _legacy_validators.extends_draft3, + "format": _validators.format, + "items": _legacy_validators.items_draft3_draft4, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maximum": _legacy_validators.maximum_draft3_draft4, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minimum": _legacy_validators.minimum_draft3_draft4, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _legacy_validators.properties_draft3, + "type": _legacy_validators.type_draft3, + "uniqueItems": _validators.uniqueItems, }, type_checker=_types.draft3_type_checker, version="draft3", - id_of=lambda schema: schema.get(u"id", ""), + id_of=lambda schema: schema.get("id", ""), + applicable_validators=_legacy_validators.ignore_ref_siblings, ) Draft4Validator = create( meta_schema=_utils.load_schema("draft4"), validators={ - u"$ref": _validators.ref, - u"additionalItems": _validators.additionalItems, - u"additionalProperties": _validators.additionalProperties, - u"allOf": _validators.allOf, - u"anyOf": _validators.anyOf, - u"dependencies": _validators.dependencies, - u"enum": _validators.enum, - u"format": _validators.format, - u"items": _legacy_validators.items_draft3_draft4, - u"maxItems": _validators.maxItems, - u"maxLength": _validators.maxLength, - u"maxProperties": _validators.maxProperties, - u"maximum": _legacy_validators.maximum_draft3_draft4, - u"minItems": _validators.minItems, - u"minLength": _validators.minLength, - u"minProperties": _validators.minProperties, - u"minimum": _legacy_validators.minimum_draft3_draft4, - u"multipleOf": _validators.multipleOf, - u"not": _validators.not_, - u"oneOf": _validators.oneOf, - u"pattern": _validators.pattern, - u"patternProperties": _validators.patternProperties, - u"properties": _validators.properties, - u"required": _validators.required, - u"type": _validators.type, - u"uniqueItems": _validators.uniqueItems, + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7, + "enum": _validators.enum, + "format": _validators.format, + "items": _legacy_validators.items_draft3_draft4, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _legacy_validators.maximum_draft3_draft4, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _legacy_validators.minimum_draft3_draft4, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _validators.properties, + "required": _validators.required, + "type": _validators.type, + "uniqueItems": _validators.uniqueItems, }, type_checker=_types.draft4_type_checker, version="draft4", - id_of=lambda schema: schema.get(u"id", ""), + id_of=lambda schema: schema.get("id", ""), + applicable_validators=_legacy_validators.ignore_ref_siblings, ) Draft6Validator = create( meta_schema=_utils.load_schema("draft6"), validators={ - u"$ref": _validators.ref, - u"additionalItems": _validators.additionalItems, - u"additionalProperties": _validators.additionalProperties, - u"allOf": _validators.allOf, - u"anyOf": _validators.anyOf, - u"const": _validators.const, - u"contains": _validators.contains, - u"dependencies": _validators.dependencies, - u"enum": _validators.enum, - u"exclusiveMaximum": _validators.exclusiveMaximum, - u"exclusiveMinimum": _validators.exclusiveMinimum, - u"format": _validators.format, - u"items": _validators.items, - u"maxItems": _validators.maxItems, - u"maxLength": _validators.maxLength, - u"maxProperties": _validators.maxProperties, - u"maximum": _validators.maximum, - u"minItems": _validators.minItems, - u"minLength": _validators.minLength, - u"minProperties": _validators.minProperties, - u"minimum": _validators.minimum, - u"multipleOf": _validators.multipleOf, - u"not": _validators.not_, - u"oneOf": _validators.oneOf, - u"pattern": _validators.pattern, - u"patternProperties": _validators.patternProperties, - u"properties": _validators.properties, - u"propertyNames": _validators.propertyNames, - u"required": _validators.required, - u"type": _validators.type, - u"uniqueItems": _validators.uniqueItems, + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "const": _validators.const, + "contains": _legacy_validators.contains_draft6_draft7, + "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7, + "enum": _validators.enum, + "exclusiveMaximum": _validators.exclusiveMaximum, + "exclusiveMinimum": _validators.exclusiveMinimum, + "format": _validators.format, + "items": _legacy_validators.items_draft6_draft7_draft201909, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _validators.maximum, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _validators.minimum, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _validators.properties, + "propertyNames": _validators.propertyNames, + "required": _validators.required, + "type": _validators.type, + "uniqueItems": _validators.uniqueItems, }, type_checker=_types.draft6_type_checker, version="draft6", + applicable_validators=_legacy_validators.ignore_ref_siblings, ) Draft7Validator = create( meta_schema=_utils.load_schema("draft7"), validators={ - u"$ref": _validators.ref, - u"additionalItems": _validators.additionalItems, - u"additionalProperties": _validators.additionalProperties, - u"allOf": _validators.allOf, - u"anyOf": _validators.anyOf, - u"const": _validators.const, - u"contains": _validators.contains, - u"dependencies": _validators.dependencies, - u"enum": _validators.enum, - u"exclusiveMaximum": _validators.exclusiveMaximum, - u"exclusiveMinimum": _validators.exclusiveMinimum, - u"format": _validators.format, - u"if": _validators.if_, - u"items": _validators.items, - u"maxItems": _validators.maxItems, - u"maxLength": _validators.maxLength, - u"maxProperties": _validators.maxProperties, - u"maximum": _validators.maximum, - u"minItems": _validators.minItems, - u"minLength": _validators.minLength, - u"minProperties": _validators.minProperties, - u"minimum": _validators.minimum, - u"multipleOf": _validators.multipleOf, - u"oneOf": _validators.oneOf, - u"not": _validators.not_, - u"pattern": _validators.pattern, - u"patternProperties": _validators.patternProperties, - u"properties": _validators.properties, - u"propertyNames": _validators.propertyNames, - u"required": _validators.required, - u"type": _validators.type, - u"uniqueItems": _validators.uniqueItems, + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "const": _validators.const, + "contains": _legacy_validators.contains_draft6_draft7, + "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7, + "enum": _validators.enum, + "exclusiveMaximum": _validators.exclusiveMaximum, + "exclusiveMinimum": _validators.exclusiveMinimum, + "format": _validators.format, + "if": _validators.if_, + "items": _legacy_validators.items_draft6_draft7_draft201909, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _validators.maximum, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _validators.minimum, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _validators.properties, + "propertyNames": _validators.propertyNames, + "required": _validators.required, + "type": _validators.type, + "uniqueItems": _validators.uniqueItems, }, type_checker=_types.draft7_type_checker, version="draft7", + applicable_validators=_legacy_validators.ignore_ref_siblings, +) + +Draft201909Validator = create( + meta_schema=_utils.load_schema("draft2019-09"), + validators={ + "$recursiveRef": _legacy_validators.recursiveRef, + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "const": _validators.const, + "contains": _validators.contains, + "dependentRequired": _validators.dependentRequired, + "dependentSchemas": _validators.dependentSchemas, + "enum": _validators.enum, + "exclusiveMaximum": _validators.exclusiveMaximum, + "exclusiveMinimum": _validators.exclusiveMinimum, + "format": _validators.format, + "if": _validators.if_, + "items": _legacy_validators.items_draft6_draft7_draft201909, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _validators.maximum, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _validators.minimum, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _validators.properties, + "propertyNames": _validators.propertyNames, + "required": _validators.required, + "type": _validators.type, + "unevaluatedItems": _validators.unevaluatedItems, + "unevaluatedProperties": _validators.unevaluatedProperties, + "uniqueItems": _validators.uniqueItems, + }, + type_checker=_types.draft201909_type_checker, + version="draft2019-09", +) + +Draft202012Validator = create( + meta_schema=_utils.load_schema("draft2020-12"), + validators={ + "$dynamicRef": _validators.dynamicRef, + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "const": _validators.const, + "contains": _validators.contains, + "dependentRequired": _validators.dependentRequired, + "dependentSchemas": _validators.dependentSchemas, + "enum": _validators.enum, + "exclusiveMaximum": _validators.exclusiveMaximum, + "exclusiveMinimum": _validators.exclusiveMinimum, + "format": _validators.format, + "if": _validators.if_, + "items": _validators.items, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _validators.maximum, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _validators.minimum, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "prefixItems": _validators.prefixItems, + "properties": _validators.properties, + "propertyNames": _validators.propertyNames, + "required": _validators.required, + "type": _validators.type, + "unevaluatedItems": _validators.unevaluatedItems, + "unevaluatedProperties": _validators.unevaluatedProperties, + "uniqueItems": _validators.uniqueItems, + }, + type_checker=_types.draft202012_type_checker, + version="draft2020-12", ) -_LATEST_VERSION = Draft7Validator +_LATEST_VERSION = Draft202012Validator class RefResolver(object): @@ -653,10 +656,7 @@ def __init__( self.handlers = dict(handlers) self._scopes_stack = [base_uri] - self.store = _utils.URIDict( - (id, validator.META_SCHEMA) - for id, validator in iteritems(meta_schemas) - ) + self.store = _utils.URIDict(_store_schema_list()) self.store.update(store) self.store[base_uri] = referrer @@ -708,7 +708,7 @@ def pop_scope(self): raise exceptions.RefResolutionError( "Failed to pop the scope from an empty stack. " "`pop_scope()` should only be called once for every " - "`push_scope()`" + "`push_scope()`", ) @property @@ -731,6 +731,12 @@ def in_scope(self, scope): """ Temporarily enter the given scope for the duration of the context. """ + warnings.warn( + "jsonschema.RefResolver.in_scope is deprecated and will be " + "removed in a future release.", + DeprecationWarning, + stacklevel=3, + ) self.push_scope(scope) try: yield @@ -758,11 +764,44 @@ def resolving(self, ref): finally: self.pop_scope() + def _find_in_referrer(self, key): + return self._get_subschemas_cache()[key] + + @lru_cache() + def _get_subschemas_cache(self): + cache = {key: [] for key in _SUBSCHEMAS_KEYWORDS} + for keyword, subschema in _search_schema( + self.referrer, _match_subschema_keywords, + ): + cache[keyword].append(subschema) + return cache + + @lru_cache() + def _find_in_subschemas(self, url): + subschemas = self._get_subschemas_cache()["$id"] + if not subschemas: + return None + uri, fragment = urldefrag(url) + for subschema in subschemas: + target_uri = self._urljoin_cache( + self.resolution_scope, subschema["$id"], + ) + if target_uri.rstrip("/") == uri.rstrip("/"): + if fragment: + subschema = self.resolve_fragment(subschema, fragment) + return url, subschema + return None + def resolve(self, ref): """ Resolve the given reference. """ - url = self._urljoin_cache(self.resolution_scope, ref) + url = self._urljoin_cache(self.resolution_scope, ref).rstrip("/") + + match = self._find_in_subschemas(url) + if match is not None: + return match + return url, self._remote_cache(url) def resolve_from_url(self, url): @@ -795,11 +834,31 @@ def resolve_fragment(self, document, fragment): a URI fragment to resolve within it """ - fragment = fragment.lstrip(u"/") - parts = unquote(fragment).split(u"/") if fragment else [] + fragment = fragment.lstrip("/") + + if not fragment: + return document + if document is self.referrer: + find = self._find_in_referrer + else: + + def find(key): + yield from _search_schema(document, _match_keyword(key)) + + for keyword in ["$anchor", "$dynamicAnchor"]: + for subschema in find(keyword): + if fragment == subschema[keyword]: + return subschema + for keyword in ["id", "$id"]: + for subschema in find(keyword): + if "#" + fragment == subschema[keyword]: + return subschema + + # Resolve via path + parts = unquote(fragment).split("/") if fragment else [] for part in parts: - part = part.replace(u"~1", u"/").replace(u"~0", u"~") + part = part.replace("~1", "/").replace("~0", "~") if isinstance(document, Sequence): # Array indexes should be turned into integers @@ -811,7 +870,7 @@ def resolve_fragment(self, document, fragment): document = document[part] except (TypeError, LookupError): raise exceptions.RefResolutionError( - "Unresolvable JSON pointer: %r" % fragment + f"Unresolvable JSON pointer: {fragment!r}", ) return document @@ -854,7 +913,7 @@ def resolve_remote(self, uri): if scheme in self.handlers: result = self.handlers[scheme](uri) - elif scheme in [u"http", u"https"] and requests: + elif scheme in ["http", "https"] and requests: # Requests has support for detecting the correct encoding of # json over http result = requests.get(uri).json() @@ -868,6 +927,35 @@ def resolve_remote(self, uri): return result +_SUBSCHEMAS_KEYWORDS = ("$id", "id", "$anchor", "$dynamicAnchor") + + +def _match_keyword(keyword): + + def matcher(value): + if keyword in value: + yield value + + return matcher + + +def _match_subschema_keywords(value): + for keyword in _SUBSCHEMAS_KEYWORDS: + if keyword in value: + yield keyword, value + + +def _search_schema(schema, matcher): + """Breadth-first search routine.""" + values = deque([schema]) + while values: + value = values.pop() + if not isinstance(value, dict): + continue + yield from matcher(value) + values.extendleft(value.values()) + + def validate(instance, schema, cls=None, *args, **kwargs): """ Validate an instance under the given schema. @@ -883,7 +971,7 @@ def validate(instance, schema, cls=None, *args, **kwargs): If you know you have a valid schema already, especially if you intend to validate multiple instances with the same schema, you - likely would prefer using the `IValidator.validate` method directly + likely would prefer using the `Validator.validate` method directly on a specific validator (e.g. ``Draft7Validator.validate``). @@ -897,7 +985,7 @@ def validate(instance, schema, cls=None, *args, **kwargs): The schema to validate with - cls (IValidator): + cls (Validator): The class that will be used to validate the instance. @@ -943,7 +1031,7 @@ def validator_for(schema, default=_LATEST_VERSION): Arguments: - schema (collections.Mapping or bool): + schema (collections.abc.Mapping or bool): the schema to look at @@ -955,9 +1043,9 @@ def validator_for(schema, default=_LATEST_VERSION): If unprovided, the default is to return the latest supported draft. """ - if schema is True or schema is False or u"$schema" not in schema: + if schema is True or schema is False or "$schema" not in schema: return default - if schema[u"$schema"] not in meta_schemas: + if schema["$schema"] not in _META_SCHEMAS: warn( ( "The metaschema specified by $schema was not found. " @@ -967,4 +1055,4 @@ def validator_for(schema, default=_LATEST_VERSION): DeprecationWarning, stacklevel=2, ) - return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION) + return _META_SCHEMAS.get(schema["$schema"], _LATEST_VERSION) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/__main__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/__main__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/_version.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/_version.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/cli.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/cli.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/driver.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/driver.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/misc/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/misc/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/bases.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/bases.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/operations/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/bases.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/bases.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/stages.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/pointer/stages.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/bases.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/bases.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/providers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/providers.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/reference/util.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/bases.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/bases.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/draft03.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/draft03.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/draft04.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/draft04.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/factorize.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/factorize.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/formats.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/formats.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/pointer_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/pointer_util.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonspec/validators/util.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/__main__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/__main__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/btm_matcher.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/btm_matcher.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/btm_utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/btm_utils.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixer_base.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixer_base.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixer_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixer_util.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_apply.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_apply.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_asserts.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_asserts.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_basestring.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_basestring.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_buffer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_buffer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_dict.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_dict.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_except.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_except.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_exec.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_exec.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_execfile.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_execfile.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_exitfunc.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_exitfunc.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_filter.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_filter.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_funcattrs.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_funcattrs.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_future.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_future.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_getcwdu.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_getcwdu.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_has_key.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_has_key.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_idioms.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_idioms.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_import.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_import.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_imports.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_imports.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_imports2.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_imports2.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_input.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_input.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_intern.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_intern.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_isinstance.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_isinstance.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_itertools.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_itertools.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_itertools_imports.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_itertools_imports.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_long.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_long.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_map.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_map.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_metaclass.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_metaclass.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_methodattrs.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_methodattrs.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_ne.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_ne.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_next.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_next.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_nonzero.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_nonzero.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_numliterals.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_numliterals.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_operator.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_operator.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_paren.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_paren.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_print.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_print.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_raise.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_raise.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_raw_input.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_raw_input.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_reduce.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_reduce.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_reload.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_reload.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_renames.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_renames.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_repr.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_repr.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_set_literal.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_set_literal.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_standarderror.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_standarderror.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_sys_exc.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_sys_exc.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_throw.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_throw.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_tuple_params.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_tuple_params.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_types.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_types.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_unicode.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_unicode.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_urllib.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_urllib.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_ws_comma.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_ws_comma.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_xrange.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_xrange.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_xreadlines.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_xreadlines.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_zip.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/fixes/fix_zip.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/main.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/main.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/patcomp.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/patcomp.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/conv.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/conv.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/driver.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/driver.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/grammar.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/grammar.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/literals.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/literals.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/parse.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/parse.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/pgen.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/pgen.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/tokenize.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pgen2/tokenize.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pygram.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pygram.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pytree.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/pytree.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/refactor.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/refactor.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/__main__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/__main__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/bom.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/bom.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/crlf.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/crlf.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/bad_order.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/bad_order.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_explicit.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_explicit.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_first.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_first.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_last.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_last.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_parrot.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_parrot.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_preorder.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/myfixes/fix_preorder.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/no_fixer_cls.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/no_fixer_cls.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/parrot_example.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/fixers/parrot_example.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/infinite_recursion.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/infinite_recursion.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/py2_test_grammar.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/py2_test_grammar.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/py3_test_grammar.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/data/py3_test_grammar.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/support.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/support.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_all_fixers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_all_fixers.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_fixers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_fixers.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_main.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_main.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_parser.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_pytree.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_pytree.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_refactor.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_refactor.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/lib2to3/tests/test_util.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/__init__.py deleted file mode 100755 index 4cb1cbc..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty to make this a package diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixer_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixer_util.py deleted file mode 100755 index 48e4689..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixer_util.py +++ /dev/null @@ -1,520 +0,0 @@ -""" -Utility functions from 2to3, 3to2 and python-modernize (and some home-grown -ones). - -Licences: -2to3: PSF License v2 -3to2: Apache Software License (from 3to2/setup.py) -python-modernize licence: BSD (from python-modernize/LICENSE) -""" - -from lib2to3.fixer_util import (FromImport, Newline, is_import, - find_root, does_tree_import, Comma) -from lib2to3.pytree import Leaf, Node -from lib2to3.pygram import python_symbols as syms, python_grammar -from lib2to3.pygram import token -from lib2to3.fixer_util import (Node, Call, Name, syms, Comma, Number) -import re - - -def canonical_fix_name(fix, avail_fixes): - """ - Examples: - >>> canonical_fix_name('fix_wrap_text_literals') - 'libfuturize.fixes.fix_wrap_text_literals' - >>> canonical_fix_name('wrap_text_literals') - 'libfuturize.fixes.fix_wrap_text_literals' - >>> canonical_fix_name('wrap_te') - ValueError("unknown fixer name") - >>> canonical_fix_name('wrap') - ValueError("ambiguous fixer name") - """ - if ".fix_" in fix: - return fix - else: - if fix.startswith('fix_'): - fix = fix[4:] - # Infer the full module name for the fixer. - # First ensure that no names clash (e.g. - # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah): - found = [f for f in avail_fixes - if f.endswith('fix_{0}'.format(fix))] - if len(found) > 1: - raise ValueError("Ambiguous fixer name. Choose a fully qualified " - "module name instead from these:\n" + - "\n".join(" " + myf for myf in found)) - elif len(found) == 0: - raise ValueError("Unknown fixer. Use --list-fixes or -l for a list.") - return found[0] - - - -## These functions are from 3to2 by Joe Amenta: - -def Star(prefix=None): - return Leaf(token.STAR, u'*', prefix=prefix) - -def DoubleStar(prefix=None): - return Leaf(token.DOUBLESTAR, u'**', prefix=prefix) - -def Minus(prefix=None): - return Leaf(token.MINUS, u'-', prefix=prefix) - -def commatize(leafs): - """ - Accepts/turns: (Name, Name, ..., Name, Name) - Returns/into: (Name, Comma, Name, Comma, ..., Name, Comma, Name) - """ - new_leafs = [] - for leaf in leafs: - new_leafs.append(leaf) - new_leafs.append(Comma()) - del new_leafs[-1] - return new_leafs - -def indentation(node): - """ - Returns the indentation for this node - Iff a node is in a suite, then it has indentation. - """ - while node.parent is not None and node.parent.type != syms.suite: - node = node.parent - if node.parent is None: - return u"" - # The first three children of a suite are NEWLINE, INDENT, (some other node) - # INDENT.value contains the indentation for this suite - # anything after (some other node) has the indentation as its prefix. - if node.type == token.INDENT: - return node.value - elif node.prev_sibling is not None and node.prev_sibling.type == token.INDENT: - return node.prev_sibling.value - elif node.prev_sibling is None: - return u"" - else: - return node.prefix - -def indentation_step(node): - """ - Dirty little trick to get the difference between each indentation level - Implemented by finding the shortest indentation string - (technically, the "least" of all of the indentation strings, but - tabs and spaces mixed won't get this far, so those are synonymous.) - """ - r = find_root(node) - # Collect all indentations into one set. - all_indents = set(i.value for i in r.pre_order() if i.type == token.INDENT) - if not all_indents: - # nothing is indented anywhere, so we get to pick what we want - return u" " # four spaces is a popular convention - else: - return min(all_indents) - -def suitify(parent): - """ - Turn the stuff after the first colon in parent's children - into a suite, if it wasn't already - """ - for node in parent.children: - if node.type == syms.suite: - # already in the prefered format, do nothing - return - - # One-liners have no suite node, we have to fake one up - for i, node in enumerate(parent.children): - if node.type == token.COLON: - break - else: - raise ValueError(u"No class suite and no ':'!") - # Move everything into a suite node - suite = Node(syms.suite, [Newline(), Leaf(token.INDENT, indentation(node) + indentation_step(node))]) - one_node = parent.children[i+1] - one_node.remove() - one_node.prefix = u'' - suite.append_child(one_node) - parent.append_child(suite) - -def NameImport(package, as_name=None, prefix=None): - """ - Accepts a package (Name node), name to import it as (string), and - optional prefix and returns a node: - import [as ] - """ - if prefix is None: - prefix = u"" - children = [Name(u"import", prefix=prefix), package] - if as_name is not None: - children.extend([Name(u"as", prefix=u" "), - Name(as_name, prefix=u" ")]) - return Node(syms.import_name, children) - -_compound_stmts = (syms.if_stmt, syms.while_stmt, syms.for_stmt, syms.try_stmt, syms.with_stmt) -_import_stmts = (syms.import_name, syms.import_from) - -def import_binding_scope(node): - """ - Generator yields all nodes for which a node (an import_stmt) has scope - The purpose of this is for a call to _find() on each of them - """ - # import_name / import_from are small_stmts - assert node.type in _import_stmts - test = node.next_sibling - # A small_stmt can only be followed by a SEMI or a NEWLINE. - while test.type == token.SEMI: - nxt = test.next_sibling - # A SEMI can only be followed by a small_stmt or a NEWLINE - if nxt.type == token.NEWLINE: - break - else: - yield nxt - # A small_stmt can only be followed by either a SEMI or a NEWLINE - test = nxt.next_sibling - # Covered all subsequent small_stmts after the import_stmt - # Now to cover all subsequent stmts after the parent simple_stmt - parent = node.parent - assert parent.type == syms.simple_stmt - test = parent.next_sibling - while test is not None: - # Yes, this will yield NEWLINE and DEDENT. Deal with it. - yield test - test = test.next_sibling - - context = parent.parent - # Recursively yield nodes following imports inside of a if/while/for/try/with statement - if context.type in _compound_stmts: - # import is in a one-liner - c = context - while c.next_sibling is not None: - yield c.next_sibling - c = c.next_sibling - context = context.parent - - # Can't chain one-liners on one line, so that takes care of that. - - p = context.parent - if p is None: - return - - # in a multi-line suite - - while p.type in _compound_stmts: - - if context.type == syms.suite: - yield context - - context = context.next_sibling - - if context is None: - context = p.parent - p = context.parent - if p is None: - break - -def ImportAsName(name, as_name, prefix=None): - new_name = Name(name) - new_as = Name(u"as", prefix=u" ") - new_as_name = Name(as_name, prefix=u" ") - new_node = Node(syms.import_as_name, [new_name, new_as, new_as_name]) - if prefix is not None: - new_node.prefix = prefix - return new_node - - -def is_docstring(node): - """ - Returns True if the node appears to be a docstring - """ - return (node.type == syms.simple_stmt and - len(node.children) > 0 and node.children[0].type == token.STRING) - - -def future_import(feature, node): - """ - This seems to work - """ - root = find_root(node) - - if does_tree_import(u"__future__", feature, node): - return - - # Look for a shebang or encoding line - shebang_encoding_idx = None - - for idx, node in enumerate(root.children): - # Is it a shebang or encoding line? - if is_shebang_comment(node) or is_encoding_comment(node): - shebang_encoding_idx = idx - if is_docstring(node): - # skip over docstring - continue - names = check_future_import(node) - if not names: - # not a future statement; need to insert before this - break - if feature in names: - # already imported - return - - import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")]) - if shebang_encoding_idx == 0 and idx == 0: - # If this __future__ import would go on the first line, - # detach the shebang / encoding prefix from the current first line. - # and attach it to our new __future__ import node. - import_.prefix = root.children[0].prefix - root.children[0].prefix = u'' - # End the __future__ import line with a newline and add a blank line - # afterwards: - children = [import_ , Newline()] - root.insert_child(idx, Node(syms.simple_stmt, children)) - - -def future_import2(feature, node): - """ - An alternative to future_import() which might not work ... - """ - root = find_root(node) - - if does_tree_import(u"__future__", feature, node): - return - - insert_pos = 0 - for idx, node in enumerate(root.children): - if node.type == syms.simple_stmt and node.children and \ - node.children[0].type == token.STRING: - insert_pos = idx + 1 - break - - for thing_after in root.children[insert_pos:]: - if thing_after.type == token.NEWLINE: - insert_pos += 1 - continue - - prefix = thing_after.prefix - thing_after.prefix = u"" - break - else: - prefix = u"" - - import_ = FromImport(u"__future__", [Leaf(token.NAME, feature, prefix=u" ")]) - - children = [import_, Newline()] - root.insert_child(insert_pos, Node(syms.simple_stmt, children, prefix=prefix)) - -def parse_args(arglist, scheme): - u""" - Parse a list of arguments into a dict - """ - arglist = [i for i in arglist if i.type != token.COMMA] - - ret_mapping = dict([(k, None) for k in scheme]) - - for i, arg in enumerate(arglist): - if arg.type == syms.argument and arg.children[1].type == token.EQUAL: - # argument < NAME '=' any > - slot = arg.children[0].value - ret_mapping[slot] = arg.children[2] - else: - slot = scheme[i] - ret_mapping[slot] = arg - - return ret_mapping - - -# def is_import_from(node): -# """Returns true if the node is a statement "from ... import ..." -# """ -# return node.type == syms.import_from - - -def is_import_stmt(node): - return (node.type == syms.simple_stmt and node.children and - is_import(node.children[0])) - - -def touch_import_top(package, name_to_import, node): - """Works like `does_tree_import` but adds an import statement at the - top if it was not imported (but below any __future__ imports) and below any - comments such as shebang lines). - - Based on lib2to3.fixer_util.touch_import() - - Calling this multiple times adds the imports in reverse order. - - Also adds "standard_library.install_aliases()" after "from future import - standard_library". This should probably be factored into another function. - """ - - root = find_root(node) - - if does_tree_import(package, name_to_import, root): - return - - # Ideally, we would look for whether futurize --all-imports has been run, - # as indicated by the presence of ``from builtins import (ascii, ..., - # zip)`` -- and, if it has, we wouldn't import the name again. - - # Look for __future__ imports and insert below them - found = False - for name in ['absolute_import', 'division', 'print_function', - 'unicode_literals']: - if does_tree_import('__future__', name, root): - found = True - break - if found: - # At least one __future__ import. We want to loop until we've seen them - # all. - start, end = None, None - for idx, node in enumerate(root.children): - if check_future_import(node): - start = idx - # Start looping - idx2 = start - while node: - node = node.next_sibling - idx2 += 1 - if not check_future_import(node): - end = idx2 - break - break - assert start is not None - assert end is not None - insert_pos = end - else: - # No __future__ imports. - # We look for a docstring and insert the new node below that. If no docstring - # exists, just insert the node at the top. - for idx, node in enumerate(root.children): - if node.type != syms.simple_stmt: - break - if not is_docstring(node): - # This is the usual case. - break - insert_pos = idx - - if package is None: - import_ = Node(syms.import_name, [ - Leaf(token.NAME, u"import"), - Leaf(token.NAME, name_to_import, prefix=u" ") - ]) - else: - import_ = FromImport(package, [Leaf(token.NAME, name_to_import, prefix=u" ")]) - if name_to_import == u'standard_library': - # Add: - # standard_library.install_aliases() - # after: - # from future import standard_library - install_hooks = Node(syms.simple_stmt, - [Node(syms.power, - [Leaf(token.NAME, u'standard_library'), - Node(syms.trailer, [Leaf(token.DOT, u'.'), - Leaf(token.NAME, u'install_aliases')]), - Node(syms.trailer, [Leaf(token.LPAR, u'('), - Leaf(token.RPAR, u')')]) - ]) - ] - ) - children_hooks = [install_hooks, Newline()] - else: - children_hooks = [] - - # FromImport(package, [Leaf(token.NAME, name_to_import, prefix=u" ")]) - - children_import = [import_, Newline()] - old_prefix = root.children[insert_pos].prefix - root.children[insert_pos].prefix = u'' - root.insert_child(insert_pos, Node(syms.simple_stmt, children_import, prefix=old_prefix)) - if len(children_hooks) > 0: - root.insert_child(insert_pos + 1, Node(syms.simple_stmt, children_hooks)) - - -## The following functions are from python-modernize by Armin Ronacher: -# (a little edited). - -def check_future_import(node): - """If this is a future import, return set of symbols that are imported, - else return None.""" - # node should be the import statement here - savenode = node - if not (node.type == syms.simple_stmt and node.children): - return set() - node = node.children[0] - # now node is the import_from node - if not (node.type == syms.import_from and - # node.type == token.NAME and # seems to break it - hasattr(node.children[1], 'value') and - node.children[1].value == u'__future__'): - return set() - if node.children[3].type == token.LPAR: - node = node.children[4] - else: - node = node.children[3] - # now node is the import_as_name[s] - # print(python_grammar.number2symbol[node.type]) # breaks sometimes - if node.type == syms.import_as_names: - result = set() - for n in node.children: - if n.type == token.NAME: - result.add(n.value) - elif n.type == syms.import_as_name: - n = n.children[0] - assert n.type == token.NAME - result.add(n.value) - return result - elif node.type == syms.import_as_name: - node = node.children[0] - assert node.type == token.NAME - return set([node.value]) - elif node.type == token.NAME: - return set([node.value]) - else: - # TODO: handle brackets like this: - # from __future__ import (absolute_import, division) - assert False, "strange import: %s" % savenode - - -SHEBANG_REGEX = r'^#!.*python' -ENCODING_REGEX = r"^#.*coding[:=]\s*([-\w.]+)" - - -def is_shebang_comment(node): - """ - Comments are prefixes for Leaf nodes. Returns whether the given node has a - prefix that looks like a shebang line or an encoding line: - - #!/usr/bin/env python - #!/usr/bin/python3 - """ - return bool(re.match(SHEBANG_REGEX, node.prefix)) - - -def is_encoding_comment(node): - """ - Comments are prefixes for Leaf nodes. Returns whether the given node has a - prefix that looks like an encoding line: - - # coding: utf-8 - # encoding: utf-8 - # -*- coding: -*- - # vim: set fileencoding= : - """ - return bool(re.match(ENCODING_REGEX, node.prefix)) - - -def wrap_in_fn_call(fn_name, args, prefix=None): - """ - Example: - >>> wrap_in_fn_call("oldstr", (arg,)) - oldstr(arg) - - >>> wrap_in_fn_call("olddiv", (arg1, arg2)) - olddiv(arg1, arg2) - - >>> wrap_in_fn_call("olddiv", [arg1, comma, arg2, comma, arg3]) - olddiv(arg1, arg2, arg3) - """ - assert len(args) > 0 - if len(args) == 2: - expr1, expr2 = args - newargs = [expr1, Comma(), expr2] - else: - newargs = args - return Call(Name(fn_name), newargs, prefix=prefix) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/__init__.py deleted file mode 100755 index 0b56250..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -import sys -from lib2to3 import refactor - -# The following fixers are "safe": they convert Python 2 code to more -# modern Python 2 code. They should be uncontroversial to apply to most -# projects that are happy to drop support for Py2.5 and below. Applying -# them first will reduce the size of the patch set for the real porting. -lib2to3_fix_names_stage1 = set([ - 'lib2to3.fixes.fix_apply', - 'lib2to3.fixes.fix_except', - 'lib2to3.fixes.fix_exec', - 'lib2to3.fixes.fix_exitfunc', - 'lib2to3.fixes.fix_funcattrs', - 'lib2to3.fixes.fix_has_key', - 'lib2to3.fixes.fix_idioms', - # 'lib2to3.fixes.fix_import', # makes any implicit relative imports explicit. (Use with ``from __future__ import absolute_import) - 'lib2to3.fixes.fix_intern', - 'lib2to3.fixes.fix_isinstance', - 'lib2to3.fixes.fix_methodattrs', - 'lib2to3.fixes.fix_ne', - # 'lib2to3.fixes.fix_next', # would replace ``next`` method names - # with ``__next__``. - 'lib2to3.fixes.fix_numliterals', # turns 1L into 1, 0755 into 0o755 - 'lib2to3.fixes.fix_paren', - # 'lib2to3.fixes.fix_print', # see the libfuturize fixer that also - # adds ``from __future__ import print_function`` - # 'lib2to3.fixes.fix_raise', # uses incompatible with_traceback() method on exceptions - 'lib2to3.fixes.fix_reduce', # reduce is available in functools on Py2.6/Py2.7 - 'lib2to3.fixes.fix_renames', # sys.maxint -> sys.maxsize - # 'lib2to3.fixes.fix_set_literal', # this is unnecessary and breaks Py2.6 support - 'lib2to3.fixes.fix_repr', - 'lib2to3.fixes.fix_standarderror', - 'lib2to3.fixes.fix_sys_exc', - 'lib2to3.fixes.fix_throw', - 'lib2to3.fixes.fix_tuple_params', - 'lib2to3.fixes.fix_types', - 'lib2to3.fixes.fix_ws_comma', # can perhaps decrease readability: see issue #58 - 'lib2to3.fixes.fix_xreadlines', -]) - -# The following fixers add a dependency on the ``future`` package on order to -# support Python 2: -lib2to3_fix_names_stage2 = set([ - # 'lib2to3.fixes.fix_buffer', # perhaps not safe. Test this. - # 'lib2to3.fixes.fix_callable', # not needed in Py3.2+ - 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. and move to stage2 - # 'lib2to3.fixes.fix_execfile', # some problems: see issue #37. - # We use a custom fixer instead (see below) - # 'lib2to3.fixes.fix_future', # we don't want to remove __future__ imports - 'lib2to3.fixes.fix_getcwdu', - # 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library - # 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm) - # 'lib2to3.fixes.fix_input', # Called conditionally by libfuturize.fixes.fix_input - 'lib2to3.fixes.fix_itertools', - 'lib2to3.fixes.fix_itertools_imports', - 'lib2to3.fixes.fix_filter', - 'lib2to3.fixes.fix_long', - 'lib2to3.fixes.fix_map', - # 'lib2to3.fixes.fix_metaclass', # causes SyntaxError in Py2! Use the one from ``six`` instead - 'lib2to3.fixes.fix_next', - 'lib2to3.fixes.fix_nonzero', # TODO: cause this to import ``object`` and/or add a decorator for mapping __bool__ to __nonzero__ - 'lib2to3.fixes.fix_operator', # we will need support for this by e.g. extending the Py2 operator module to provide those functions in Py3 - 'lib2to3.fixes.fix_raw_input', - # 'lib2to3.fixes.fix_unicode', # strips off the u'' prefix, which removes a potentially helpful source of information for disambiguating unicode/byte strings - # 'lib2to3.fixes.fix_urllib', # included in libfuturize.fix_future_standard_library_urllib - # 'lib2to3.fixes.fix_xrange', # custom one because of a bug with Py3.3's lib2to3 - 'lib2to3.fixes.fix_zip', -]) - -libfuturize_fix_names_stage1 = set([ - 'libfuturize.fixes.fix_absolute_import', - 'libfuturize.fixes.fix_next_call', # obj.next() -> next(obj). Unlike - # lib2to3.fixes.fix_next, doesn't change - # the ``next`` method to ``__next__``. - 'libfuturize.fixes.fix_print_with_import', - 'libfuturize.fixes.fix_raise', - # 'libfuturize.fixes.fix_order___future__imports', # TODO: consolidate to a single line to simplify testing -]) - -libfuturize_fix_names_stage2 = set([ - 'libfuturize.fixes.fix_basestring', - # 'libfuturize.fixes.fix_add__future__imports_except_unicode_literals', # just in case - 'libfuturize.fixes.fix_cmp', - 'libfuturize.fixes.fix_division_safe', - 'libfuturize.fixes.fix_execfile', - 'libfuturize.fixes.fix_future_builtins', - 'libfuturize.fixes.fix_future_standard_library', - 'libfuturize.fixes.fix_future_standard_library_urllib', - 'libfuturize.fixes.fix_input', - 'libfuturize.fixes.fix_metaclass', - 'libpasteurize.fixes.fix_newstyle', - 'libfuturize.fixes.fix_object', - # 'libfuturize.fixes.fix_order___future__imports', # TODO: consolidate to a single line to simplify testing - 'libfuturize.fixes.fix_unicode_keep_u', - # 'libfuturize.fixes.fix_unicode_literals_import', - 'libfuturize.fixes.fix_xrange_with_import', # custom one because of a bug with Py3.3's lib2to3 -]) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_UserDict.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_UserDict.py deleted file mode 100755 index cb0cfac..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_UserDict.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Fix UserDict. - -Incomplete! - -TODO: base this on fix_urllib perhaps? -""" - - -# Local imports -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name, attr_chain -from lib2to3.fixes.fix_imports import alternates, build_pattern, FixImports - -MAPPING = {'UserDict': 'collections', -} - -# def alternates(members): -# return "(" + "|".join(map(repr, members)) + ")" -# -# -# def build_pattern(mapping=MAPPING): -# mod_list = ' | '.join(["module_name='%s'" % key for key in mapping]) -# bare_names = alternates(mapping.keys()) -# -# yield """name_import=import_name< 'import' ((%s) | -# multiple_imports=dotted_as_names< any* (%s) any* >) > -# """ % (mod_list, mod_list) -# yield """import_from< 'from' (%s) 'import' ['('] -# ( any | import_as_name< any 'as' any > | -# import_as_names< any* >) [')'] > -# """ % mod_list -# yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > | -# multiple_imports=dotted_as_names< -# any* dotted_as_name< (%s) 'as' any > any* >) > -# """ % (mod_list, mod_list) -# -# # Find usages of module members in code e.g. thread.foo(bar) -# yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names - - -# class FixUserDict(fixer_base.BaseFix): -class FixUserdict(FixImports): - - BM_compatible = True - keep_line_order = True - # This is overridden in fix_imports2. - mapping = MAPPING - - # We want to run this fixer late, so fix_import doesn't try to make stdlib - # renames into relative imports. - run_order = 6 - - def build_pattern(self): - return "|".join(build_pattern(self.mapping)) - - def compile_pattern(self): - # We override this, so MAPPING can be pragmatically altered and the - # changes will be reflected in PATTERN. - self.PATTERN = self.build_pattern() - super(FixImports, self).compile_pattern() - - # Don't match the node if it's within another match. - def match(self, node): - match = super(FixImports, self).match - results = match(node) - if results: - # Module usage could be in the trailer of an attribute lookup, so we - # might have nested matches when "bare_with_attr" is present. - if "bare_with_attr" not in results and \ - any(match(obj) for obj in attr_chain(node, "parent")): - return False - return results - return False - - def start_tree(self, tree, filename): - super(FixImports, self).start_tree(tree, filename) - self.replace = {} - - def transform(self, node, results): - import_mod = results.get("module_name") - if import_mod: - mod_name = import_mod.value - new_name = unicode(self.mapping[mod_name]) - import_mod.replace(Name(new_name, prefix=import_mod.prefix)) - if "name_import" in results: - # If it's not a "from x import x, y" or "import x as y" import, - # marked its usage to be replaced. - self.replace[mod_name] = new_name - if "multiple_imports" in results: - # This is a nasty hack to fix multiple imports on a line (e.g., - # "import StringIO, urlparse"). The problem is that I can't - # figure out an easy way to make a pattern recognize the keys of - # MAPPING randomly sprinkled in an import statement. - results = self.match(node) - if results: - self.transform(node, results) - else: - # Replace usage of the module. - bare_name = results["bare_with_attr"][0] - new_name = self.replace.get(bare_name.value) - if new_name: - bare_name.replace(Name(new_name, prefix=bare_name.prefix)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_absolute_import.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_absolute_import.py deleted file mode 100755 index eab9c52..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_absolute_import.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Fixer for import statements, with a __future__ import line. - -Based on lib2to3/fixes/fix_import.py, but extended slightly so it also -supports Cython modules. - -If spam is being imported from the local directory, this import: - from spam import eggs -becomes: - from __future__ import absolute_import - from .spam import eggs - -and this import: - import spam -becomes: - from __future__ import absolute_import - from . import spam -""" - -from os.path import dirname, join, exists, sep -from lib2to3.fixes.fix_import import FixImport -from lib2to3.fixer_util import FromImport, syms -from lib2to3.fixes.fix_import import traverse_imports - -from libfuturize.fixer_util import future_import - - -class FixAbsoluteImport(FixImport): - run_order = 9 - - def transform(self, node, results): - """ - Copied from FixImport.transform(), but with this line added in - any modules that had implicit relative imports changed: - - from __future__ import absolute_import" - """ - if self.skip: - return - imp = results['imp'] - - if node.type == syms.import_from: - # Some imps are top-level (eg: 'import ham') - # some are first level (eg: 'import ham.eggs') - # some are third level (eg: 'import ham.eggs as spam') - # Hence, the loop - while not hasattr(imp, 'value'): - imp = imp.children[0] - if self.probably_a_local_import(imp.value): - imp.value = u"." + imp.value - imp.changed() - future_import(u"absolute_import", node) - else: - have_local = False - have_absolute = False - for mod_name in traverse_imports(imp): - if self.probably_a_local_import(mod_name): - have_local = True - else: - have_absolute = True - if have_absolute: - if have_local: - # We won't handle both sibling and absolute imports in the - # same statement at the moment. - self.warning(node, "absolute and local imports together") - return - - new = FromImport(u".", [imp]) - new.prefix = node.prefix - future_import(u"absolute_import", node) - return new - - def probably_a_local_import(self, imp_name): - """ - Like the corresponding method in the base class, but this also - supports Cython modules. - """ - if imp_name.startswith(u"."): - # Relative imports are certainly not local imports. - return False - imp_name = imp_name.split(u".", 1)[0] - base_path = dirname(self.filename) - base_path = join(base_path, imp_name) - # If there is no __init__.py next to the file its not in a package - # so can't be a relative import. - if not exists(join(dirname(base_path), "__init__.py")): - return False - for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd", ".pyx"]: - if exists(base_path + ext): - return True - return False diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py deleted file mode 100755 index 37d7fee..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -Fixer for adding: - - from __future__ import absolute_import - from __future__ import division - from __future__ import print_function - -This is "stage 1": hopefully uncontroversial changes. - -Stage 2 adds ``unicode_literals``. -""" - -from lib2to3 import fixer_base -from libfuturize.fixer_util import future_import - -class FixAddFutureImportsExceptUnicodeLiterals(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "file_input" - - run_order = 9 - - def transform(self, node, results): - # Reverse order: - future_import(u"absolute_import", node) - future_import(u"division", node) - future_import(u"print_function", node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_basestring.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_basestring.py deleted file mode 100755 index 5676d08..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_basestring.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Fixer that adds ``from past.builtins import basestring`` if there is a -reference to ``basestring`` -""" - -from lib2to3 import fixer_base - -from libfuturize.fixer_util import touch_import_top - - -class FixBasestring(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = "'basestring'" - - def transform(self, node, results): - touch_import_top(u'past.builtins', 'basestring', node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_bytes.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_bytes.py deleted file mode 100755 index 4202122..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_bytes.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Optional fixer that changes all unprefixed string literals "..." to b"...". - -br'abcd' is a SyntaxError on Python 2 but valid on Python 3. -ur'abcd' is a SyntaxError on Python 3 but valid on Python 2. - -""" -from __future__ import unicode_literals - -import re -from lib2to3.pgen2 import token -from lib2to3 import fixer_base - -_literal_re = re.compile(r"[^bBuUrR]?[\'\"]") - -class FixBytes(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "STRING" - - def transform(self, node, results): - if node.type == token.STRING: - if _literal_re.match(node.value): - new = node.clone() - new.value = u'b' + new.value - return new diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_cmp.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_cmp.py deleted file mode 100755 index 762eb4b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_cmp.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding: utf-8 -""" -Fixer for the cmp() function on Py2, which was removed in Py3. - -Adds this import line:: - - from past.builtins import cmp - -if cmp() is called in the code. -""" - -from __future__ import unicode_literals -from lib2to3 import fixer_base - -from libfuturize.fixer_util import touch_import_top - - -expression = "name='cmp'" - - -class FixCmp(fixer_base.BaseFix): - BM_compatible = True - run_order = 9 - - PATTERN = """ - power< - ({0}) trailer< '(' args=[any] ')' > - rest=any* > - """.format(expression) - - def transform(self, node, results): - name = results["name"] - touch_import_top(u'past.builtins', name.value, node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_division.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_division.py deleted file mode 100755 index 6975a52..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_division.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -UNFINISHED -For the ``future`` package. - -Adds this import line: - - from __future__ import division - -at the top so the code runs identically on Py3 and Py2.6/2.7 -""" - -from libpasteurize.fixes.fix_division import FixDivision diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_division_safe.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_division_safe.py deleted file mode 100755 index 3d5909c..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_division_safe.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -For the ``future`` package. - -Adds this import line: - - from __future__ import division - -at the top and changes any old-style divisions to be calls to -past.utils.old_div so the code runs as before on Py2.6/2.7 and has the same -behaviour on Py3. - -If "from __future__ import division" is already in effect, this fixer does -nothing. -""" - -import re -from lib2to3.fixer_util import Leaf, Node, Comma -from lib2to3 import fixer_base -from libfuturize.fixer_util import (token, future_import, touch_import_top, - wrap_in_fn_call) - - -def match_division(node): - u""" - __future__.division redefines the meaning of a single slash for division, - so we match that and only that. - """ - slash = token.SLASH - return node.type == slash and not node.next_sibling.type == slash and \ - not node.prev_sibling.type == slash - -const_re = re.compile('^[0-9]*[.][0-9]*$') - -def is_floaty(node): - return _is_floaty(node.prev_sibling) or _is_floaty(node.next_sibling) - - -def _is_floaty(expr): - if isinstance(expr, list): - expr = expr[0] - - if isinstance(expr, Leaf): - # If it's a leaf, let's see if it's a numeric constant containing a '.' - return const_re.match(expr.value) - elif isinstance(expr, Node): - # If the expression is a node, let's see if it's a direct cast to float - if isinstance(expr.children[0], Leaf): - return expr.children[0].value == u'float' - return False - - -class FixDivisionSafe(fixer_base.BaseFix): - # BM_compatible = True - run_order = 4 # this seems to be ignored? - - _accept_type = token.SLASH - - PATTERN = """ - term<(not('/') any)+ '/' ((not('/') any))> - """ - - def start_tree(self, tree, name): - """ - Skip this fixer if "__future__.division" is already imported. - """ - super(FixDivisionSafe, self).start_tree(tree, name) - self.skip = "division" in tree.future_features - - def match(self, node): - u""" - Since the tree needs to be fixed once and only once if and only if it - matches, we can start discarding matches after the first. - """ - if node.type == self.syms.term: - matched = False - skip = False - children = [] - for child in node.children: - if skip: - skip = False - continue - if match_division(child) and not is_floaty(child): - matched = True - - # Strip any leading space for the first number: - children[0].prefix = u'' - - children = [wrap_in_fn_call("old_div", - children + [Comma(), child.next_sibling.clone()], - prefix=node.prefix)] - skip = True - else: - children.append(child.clone()) - if matched: - return Node(node.type, children, fixers_applied=node.fixers_applied) - - return False - - def transform(self, node, results): - if self.skip: - return - future_import(u"division", node) - touch_import_top(u'past.utils', u'old_div', node) - return results diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_execfile.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_execfile.py deleted file mode 100755 index cfe9d8d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_execfile.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding: utf-8 -""" -Fixer for the execfile() function on Py2, which was removed in Py3. - -The Lib/lib2to3/fixes/fix_execfile.py module has some problems: see -python-future issue #37. This fixer merely imports execfile() from -past.builtins and leaves the code alone. - -Adds this import line:: - - from past.builtins import execfile - -for the function execfile() that was removed from Py3. -""" - -from __future__ import unicode_literals -from lib2to3 import fixer_base - -from libfuturize.fixer_util import touch_import_top - - -expression = "name='execfile'" - - -class FixExecfile(fixer_base.BaseFix): - BM_compatible = True - run_order = 9 - - PATTERN = """ - power< - ({0}) trailer< '(' args=[any] ')' > - rest=any* > - """.format(expression) - - def transform(self, node, results): - name = results["name"] - touch_import_top(u'past.builtins', name.value, node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_builtins.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_builtins.py deleted file mode 100755 index eea6c6a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_builtins.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -For the ``future`` package. - -Adds this import line:: - - from builtins import XYZ - -for each of the functions XYZ that is used in the module. - -Adds these imports after any other imports (in an initial block of them). -""" - -from __future__ import unicode_literals - -from lib2to3 import fixer_base -from lib2to3.pygram import python_symbols as syms -from lib2to3.fixer_util import Name, Call, in_special_context - -from libfuturize.fixer_util import touch_import_top - -# All builtins are: -# from future.builtins.iterators import (filter, map, zip) -# from future.builtins.misc import (ascii, chr, hex, input, isinstance, oct, open, round, super) -# from future.types import (bytes, dict, int, range, str) -# We don't need isinstance any more. - -replaced_builtin_fns = '''filter map zip - ascii chr hex input next oct - bytes range str raw_input'''.split() - # This includes raw_input as a workaround for the - # lib2to3 fixer for raw_input on Py3 (only), allowing - # the correct import to be included. (Py3 seems to run - # the fixers the wrong way around, perhaps ignoring the - # run_order class attribute below ...) - -expression = '|'.join(["name='{0}'".format(name) for name in replaced_builtin_fns]) - - -class FixFutureBuiltins(fixer_base.BaseFix): - BM_compatible = True - run_order = 7 - - # Currently we only match uses as a function. This doesn't match e.g.: - # if isinstance(s, str): - # ... - PATTERN = """ - power< - ({0}) trailer< '(' [arglist=any] ')' > - rest=any* > - | - power< - 'map' trailer< '(' [arglist=any] ')' > - > - """.format(expression) - - def transform(self, node, results): - name = results["name"] - touch_import_top(u'builtins', name.value, node) - # name.replace(Name(u"input", prefix=name.prefix)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_standard_library.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_standard_library.py deleted file mode 100755 index a1c3f3d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_standard_library.py +++ /dev/null @@ -1,24 +0,0 @@ -""" -For the ``future`` package. - -Changes any imports needed to reflect the standard library reorganization. Also -Also adds these import lines: - - from future import standard_library - standard_library.install_aliases() - -after any __future__ imports but before any other imports. -""" - -from lib2to3.fixes.fix_imports import FixImports -from libfuturize.fixer_util import touch_import_top - - -class FixFutureStandardLibrary(FixImports): - run_order = 8 - - def transform(self, node, results): - result = super(FixFutureStandardLibrary, self).transform(node, results) - # TODO: add a blank line between any __future__ imports and this? - touch_import_top(u'future', u'standard_library', node) - return result diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_standard_library_urllib.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_standard_library_urllib.py deleted file mode 100755 index cf67388..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_future_standard_library_urllib.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -For the ``future`` package. - -A special fixer that ensures that these lines have been added:: - - from future import standard_library - standard_library.install_hooks() - -even if the only module imported was ``urllib``, in which case the regular fixer -wouldn't have added these lines. - -""" - -from lib2to3.fixes.fix_urllib import FixUrllib -from libfuturize.fixer_util import touch_import_top, find_root - - -class FixFutureStandardLibraryUrllib(FixUrllib): # not a subclass of FixImports - run_order = 8 - - def transform(self, node, results): - # transform_member() in lib2to3/fixes/fix_urllib.py breaks node so find_root(node) - # no longer works after the super() call below. So we find the root first: - root = find_root(node) - result = super(FixFutureStandardLibraryUrllib, self).transform(node, results) - # TODO: add a blank line between any __future__ imports and this? - touch_import_top(u'future', u'standard_library', root) - return result diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_input.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_input.py deleted file mode 100755 index 8a43882..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_input.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Fixer for input. - -Does a check for `from builtins import input` before running the lib2to3 fixer. -The fixer will not run when the input is already present. - - -this: - a = input() -becomes: - from builtins import input - a = eval(input()) - -and this: - from builtins import input - a = input() -becomes (no change): - from builtins import input - a = input() -""" - -import lib2to3.fixes.fix_input -from lib2to3.fixer_util import does_tree_import - - -class FixInput(lib2to3.fixes.fix_input.FixInput): - def transform(self, node, results): - - if does_tree_import('builtins', 'input', node): - return - - return super(FixInput, self).transform(node, results) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_metaclass.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_metaclass.py deleted file mode 100755 index 2ac41c9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_metaclass.py +++ /dev/null @@ -1,262 +0,0 @@ -# coding: utf-8 -"""Fixer for __metaclass__ = X -> (future.utils.with_metaclass(X)) methods. - - The various forms of classef (inherits nothing, inherits once, inherints - many) don't parse the same in the CST so we look at ALL classes for - a __metaclass__ and if we find one normalize the inherits to all be - an arglist. - - For one-liner classes ('class X: pass') there is no indent/dedent so - we normalize those into having a suite. - - Moving the __metaclass__ into the classdef can also cause the class - body to be empty so there is some special casing for that as well. - - This fixer also tries very hard to keep original indenting and spacing - in all those corner cases. -""" -# This is a derived work of Lib/lib2to3/fixes/fix_metaclass.py under the -# copyright of the Python Software Foundation, licensed under the Python -# Software Foundation License 2. -# -# Copyright notice: -# -# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012, 2013 Python Software Foundation. All rights reserved. -# -# Full license text: http://docs.python.org/3.4/license.html - -# Author: Jack Diederich, Daniel Neuhäuser - -# Local imports -from lib2to3 import fixer_base -from lib2to3.pygram import token -from lib2to3.fixer_util import Name, syms, Node, Leaf, touch_import, Call, \ - String, Comma, parenthesize - - -def has_metaclass(parent): - """ we have to check the cls_node without changing it. - There are two possiblities: - 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') - 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') - """ - for node in parent.children: - if node.type == syms.suite: - return has_metaclass(node) - elif node.type == syms.simple_stmt and node.children: - expr_node = node.children[0] - if expr_node.type == syms.expr_stmt and expr_node.children: - left_side = expr_node.children[0] - if isinstance(left_side, Leaf) and \ - left_side.value == '__metaclass__': - return True - return False - - -def fixup_parse_tree(cls_node): - """ one-line classes don't get a suite in the parse tree so we add - one to normalize the tree - """ - for node in cls_node.children: - if node.type == syms.suite: - # already in the preferred format, do nothing - return - - # !%@#! oneliners have no suite node, we have to fake one up - for i, node in enumerate(cls_node.children): - if node.type == token.COLON: - break - else: - raise ValueError("No class suite and no ':'!") - - # move everything into a suite node - suite = Node(syms.suite, []) - while cls_node.children[i+1:]: - move_node = cls_node.children[i+1] - suite.append_child(move_node.clone()) - move_node.remove() - cls_node.append_child(suite) - node = suite - - -def fixup_simple_stmt(parent, i, stmt_node): - """ if there is a semi-colon all the parts count as part of the same - simple_stmt. We just want the __metaclass__ part so we move - everything efter the semi-colon into its own simple_stmt node - """ - for semi_ind, node in enumerate(stmt_node.children): - if node.type == token.SEMI: # *sigh* - break - else: - return - - node.remove() # kill the semicolon - new_expr = Node(syms.expr_stmt, []) - new_stmt = Node(syms.simple_stmt, [new_expr]) - while stmt_node.children[semi_ind:]: - move_node = stmt_node.children[semi_ind] - new_expr.append_child(move_node.clone()) - move_node.remove() - parent.insert_child(i, new_stmt) - new_leaf1 = new_stmt.children[0].children[0] - old_leaf1 = stmt_node.children[0].children[0] - new_leaf1.prefix = old_leaf1.prefix - - -def remove_trailing_newline(node): - if node.children and node.children[-1].type == token.NEWLINE: - node.children[-1].remove() - - -def find_metas(cls_node): - # find the suite node (Mmm, sweet nodes) - for node in cls_node.children: - if node.type == syms.suite: - break - else: - raise ValueError("No class suite!") - - # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ] - for i, simple_node in list(enumerate(node.children)): - if simple_node.type == syms.simple_stmt and simple_node.children: - expr_node = simple_node.children[0] - if expr_node.type == syms.expr_stmt and expr_node.children: - # Check if the expr_node is a simple assignment. - left_node = expr_node.children[0] - if isinstance(left_node, Leaf) and \ - left_node.value == u'__metaclass__': - # We found a assignment to __metaclass__. - fixup_simple_stmt(node, i, simple_node) - remove_trailing_newline(simple_node) - yield (node, i, simple_node) - - -def fixup_indent(suite): - """ If an INDENT is followed by a thing with a prefix then nuke the prefix - Otherwise we get in trouble when removing __metaclass__ at suite start - """ - kids = suite.children[::-1] - # find the first indent - while kids: - node = kids.pop() - if node.type == token.INDENT: - break - - # find the first Leaf - while kids: - node = kids.pop() - if isinstance(node, Leaf) and node.type != token.DEDENT: - if node.prefix: - node.prefix = u'' - return - else: - kids.extend(node.children[::-1]) - - -class FixMetaclass(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - classdef - """ - - def transform(self, node, results): - if not has_metaclass(node): - return - - fixup_parse_tree(node) - - # find metaclasses, keep the last one - last_metaclass = None - for suite, i, stmt in find_metas(node): - last_metaclass = stmt - stmt.remove() - - text_type = node.children[0].type # always Leaf(nnn, 'class') - - # figure out what kind of classdef we have - if len(node.children) == 7: - # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite]) - # 0 1 2 3 4 5 6 - if node.children[3].type == syms.arglist: - arglist = node.children[3] - # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite]) - else: - parent = node.children[3].clone() - arglist = Node(syms.arglist, [parent]) - node.set_child(3, arglist) - elif len(node.children) == 6: - # Node(classdef, ['class', 'name', '(', ')', ':', suite]) - # 0 1 2 3 4 5 - arglist = Node(syms.arglist, []) - node.insert_child(3, arglist) - elif len(node.children) == 4: - # Node(classdef, ['class', 'name', ':', suite]) - # 0 1 2 3 - arglist = Node(syms.arglist, []) - node.insert_child(2, Leaf(token.RPAR, u')')) - node.insert_child(2, arglist) - node.insert_child(2, Leaf(token.LPAR, u'(')) - else: - raise ValueError("Unexpected class definition") - - # now stick the metaclass in the arglist - meta_txt = last_metaclass.children[0].children[0] - meta_txt.value = 'metaclass' - orig_meta_prefix = meta_txt.prefix - - # Was: touch_import(None, u'future.utils', node) - touch_import(u'future.utils', u'with_metaclass', node) - - metaclass = last_metaclass.children[0].children[2].clone() - metaclass.prefix = u'' - - arguments = [metaclass] - - if arglist.children: - if len(arglist.children) == 1: - base = arglist.children[0].clone() - base.prefix = u' ' - else: - # Unfortunately six.with_metaclass() only allows one base - # class, so we have to dynamically generate a base class if - # there is more than one. - bases = parenthesize(arglist.clone()) - bases.prefix = u' ' - base = Call(Name('type'), [ - String("'NewBase'"), - Comma(), - bases, - Comma(), - Node( - syms.atom, - [Leaf(token.LBRACE, u'{'), Leaf(token.RBRACE, u'}')], - prefix=u' ' - ) - ], prefix=u' ') - arguments.extend([Comma(), base]) - - arglist.replace(Call( - Name(u'with_metaclass', prefix=arglist.prefix), - arguments - )) - - fixup_indent(suite) - - # check for empty suite - if not suite.children: - # one-liner that was just __metaclass_ - suite.remove() - pass_leaf = Leaf(text_type, u'pass') - pass_leaf.prefix = orig_meta_prefix - node.append_child(pass_leaf) - node.append_child(Leaf(token.NEWLINE, u'\n')) - - elif len(suite.children) > 1 and \ - (suite.children[-2].type == token.INDENT and - suite.children[-1].type == token.DEDENT): - # there was only one line in the class body and it was __metaclass__ - pass_leaf = Leaf(text_type, u'pass') - suite.insert_child(-1, pass_leaf) - suite.insert_child(-1, Leaf(token.NEWLINE, u'\n')) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_next_call.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_next_call.py deleted file mode 100755 index 282f185..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_next_call.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Based on fix_next.py by Collin Winter. - -Replaces it.next() -> next(it), per PEP 3114. - -Unlike fix_next.py, this fixer doesn't replace the name of a next method with __next__, -which would break Python 2 compatibility without further help from fixers in -stage 2. -""" - -# Local imports -from lib2to3.pgen2 import token -from lib2to3.pygram import python_symbols as syms -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name, Call, find_binding - -bind_warning = "Calls to builtin next() possibly shadowed by global binding" - - -class FixNextCall(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > > - | - power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > > - | - global=global_stmt< 'global' any* 'next' any* > - """ - - order = "pre" # Pre-order tree traversal - - def start_tree(self, tree, filename): - super(FixNextCall, self).start_tree(tree, filename) - - n = find_binding('next', tree) - if n: - self.warning(n, bind_warning) - self.shadowed_next = True - else: - self.shadowed_next = False - - def transform(self, node, results): - assert results - - base = results.get("base") - attr = results.get("attr") - name = results.get("name") - - if base: - if self.shadowed_next: - # Omit this: - # attr.replace(Name("__next__", prefix=attr.prefix)) - pass - else: - base = [n.clone() for n in base] - base[0].prefix = "" - node.replace(Call(Name("next", prefix=node.prefix), base)) - elif name: - # Omit this: - # n = Name("__next__", prefix=name.prefix) - # name.replace(n) - pass - elif attr: - # We don't do this transformation if we're assigning to "x.next". - # Unfortunately, it doesn't seem possible to do this in PATTERN, - # so it's being done here. - if is_assign_target(node): - head = results["head"] - if "".join([str(n) for n in head]).strip() == '__builtin__': - self.warning(node, bind_warning) - return - # Omit this: - # attr.replace(Name("__next__")) - elif "global" in results: - self.warning(node, bind_warning) - self.shadowed_next = True - - -### The following functions help test if node is part of an assignment -### target. - -def is_assign_target(node): - assign = find_assign(node) - if assign is None: - return False - - for child in assign.children: - if child.type == token.EQUAL: - return False - elif is_subtree(child, node): - return True - return False - -def find_assign(node): - if node.type == syms.expr_stmt: - return node - if node.type == syms.simple_stmt or node.parent is None: - return None - return find_assign(node.parent) - -def is_subtree(root, node): - if root == node: - return True - return any(is_subtree(c, node) for c in root.children) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_object.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_object.py deleted file mode 100755 index accf2c5..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_object.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Fixer that adds ``from builtins import object`` if there is a line -like this: - class Foo(object): -""" - -from lib2to3 import fixer_base - -from libfuturize.fixer_util import touch_import_top - - -class FixObject(fixer_base.BaseFix): - - PATTERN = u"classdef< 'class' NAME '(' name='object' ')' colon=':' any >" - - def transform(self, node, results): - touch_import_top(u'builtins', 'object', node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_oldstr_wrap.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_oldstr_wrap.py deleted file mode 100755 index ad58771..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_oldstr_wrap.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -For the ``future`` package. - -Adds this import line: - - from past.builtins import str as oldstr - -at the top and wraps any unadorned string literals 'abc' or explicit byte-string -literals b'abc' in oldstr() calls so the code has the same behaviour on Py3 as -on Py2.6/2.7. -""" - -from __future__ import unicode_literals -import re -from lib2to3 import fixer_base -from lib2to3.pgen2 import token -from lib2to3.fixer_util import syms -from libfuturize.fixer_util import (future_import, touch_import_top, - wrap_in_fn_call) - - -_literal_re = re.compile(r"[^uUrR]?[\'\"]") - - -class FixOldstrWrap(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "STRING" - - def transform(self, node, results): - if node.type == token.STRING: - touch_import_top(u'past.types', u'oldstr', node) - if _literal_re.match(node.value): - new = node.clone() - # Strip any leading space or comments: - # TODO: check: do we really want to do this? - new.prefix = u'' - new.value = u'b' + new.value - wrapped = wrap_in_fn_call("oldstr", [new], prefix=node.prefix) - return wrapped diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_order___future__imports.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_order___future__imports.py deleted file mode 100755 index 00d7ef6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_order___future__imports.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -UNFINISHED - -Fixer for turning multiple lines like these: - - from __future__ import division - from __future__ import absolute_import - from __future__ import print_function - -into a single line like this: - - from __future__ import (absolute_import, division, print_function) - -This helps with testing of ``futurize``. -""" - -from lib2to3 import fixer_base -from libfuturize.fixer_util import future_import - -class FixOrderFutureImports(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "file_input" - - run_order = 10 - - # def match(self, node): - # """ - # Match only once per file - # """ - # if hasattr(node, 'type') and node.type == syms.file_input: - # return True - # return False - - def transform(self, node, results): - # TODO # write me - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_print.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_print.py deleted file mode 100755 index 247b91b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_print.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for print. - -Change: - "print" into "print()" - "print ..." into "print(...)" - "print(...)" not changed - "print ... ," into "print(..., end=' ')" - "print >>x, ..." into "print(..., file=x)" - -No changes are applied if print_function is imported from __future__ - -""" - -# Local imports -from lib2to3 import patcomp, pytree, fixer_base -from lib2to3.pgen2 import token -from lib2to3.fixer_util import Name, Call, Comma, String -# from libmodernize import add_future - -parend_expr = patcomp.compile_pattern( - """atom< '(' [arith_expr|atom|power|term|STRING|NAME] ')' >""" - ) - - -class FixPrint(fixer_base.BaseFix): - - BM_compatible = True - - PATTERN = """ - simple_stmt< any* bare='print' any* > | print_stmt - """ - - def transform(self, node, results): - assert results - - bare_print = results.get("bare") - - if bare_print: - # Special-case print all by itself. - bare_print.replace(Call(Name(u"print"), [], - prefix=bare_print.prefix)) - # The "from __future__ import print_function"" declaration is added - # by the fix_print_with_import fixer, so we skip it here. - # add_future(node, u'print_function') - return - assert node.children[0] == Name(u"print") - args = node.children[1:] - if len(args) == 1 and parend_expr.match(args[0]): - # We don't want to keep sticking parens around an - # already-parenthesised expression. - return - - sep = end = file = None - if args and args[-1] == Comma(): - args = args[:-1] - end = " " - if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"): - assert len(args) >= 2 - file = args[1].clone() - args = args[3:] # Strip a possible comma after the file expression - # Now synthesize a print(args, sep=..., end=..., file=...) node. - l_args = [arg.clone() for arg in args] - if l_args: - l_args[0].prefix = u"" - if sep is not None or end is not None or file is not None: - if sep is not None: - self.add_kwarg(l_args, u"sep", String(repr(sep))) - if end is not None: - self.add_kwarg(l_args, u"end", String(repr(end))) - if file is not None: - self.add_kwarg(l_args, u"file", file) - n_stmt = Call(Name(u"print"), l_args) - n_stmt.prefix = node.prefix - - # Note that there are corner cases where adding this future-import is - # incorrect, for example when the file also has a 'print ()' statement - # that was intended to print "()". - # add_future(node, u'print_function') - return n_stmt - - def add_kwarg(self, l_nodes, s_kwd, n_expr): - # XXX All this prefix-setting may lose comments (though rarely) - n_expr.prefix = u"" - n_argument = pytree.Node(self.syms.argument, - (Name(s_kwd), - pytree.Leaf(token.EQUAL, u"="), - n_expr)) - if l_nodes: - l_nodes.append(Comma()) - n_argument.prefix = u" " - l_nodes.append(n_argument) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_print_with_import.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_print_with_import.py deleted file mode 100755 index 3449046..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_print_with_import.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -For the ``future`` package. - -Turns any print statements into functions and adds this import line: - - from __future__ import print_function - -at the top to retain compatibility with Python 2.6+. -""" - -from libfuturize.fixes.fix_print import FixPrint -from libfuturize.fixer_util import future_import - -class FixPrintWithImport(FixPrint): - run_order = 7 - def transform(self, node, results): - # Add the __future__ import first. (Otherwise any shebang or encoding - # comment line attached as a prefix to the print statement will be - # copied twice and appear twice.) - future_import(u'print_function', node) - n_stmt = super(FixPrintWithImport, self).transform(node, results) - return n_stmt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_raise.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_raise.py deleted file mode 100755 index f751841..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_raise.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Fixer for 'raise E, V' - -From Armin Ronacher's ``python-modernize``. - -raise -> raise -raise E -> raise E -raise E, 5 -> raise E(5) -raise E, 5, T -> raise E(5).with_traceback(T) -raise E, None, T -> raise E.with_traceback(T) - -raise (((E, E'), E''), E'''), 5 -> raise E(5) -raise "foo", V, T -> warns about string exceptions - -raise E, (V1, V2) -> raise E(V1, V2) -raise E, (V1, V2), T -> raise E(V1, V2).with_traceback(T) - - -CAVEATS: -1) "raise E, V, T" cannot be translated safely in general. If V - is not a tuple or a (number, string, None) literal, then: - - raise E, V, T -> from future.utils import raise_ - raise_(E, V, T) -""" -# Author: Collin Winter, Armin Ronacher, Mark Huang - -# Local imports -from lib2to3 import pytree, fixer_base -from lib2to3.pgen2 import token -from lib2to3.fixer_util import Name, Call, is_tuple, Comma, Attr, ArgList - -from libfuturize.fixer_util import touch_import_top - - -class FixRaise(fixer_base.BaseFix): - - BM_compatible = True - PATTERN = """ - raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] > - """ - - def transform(self, node, results): - syms = self.syms - - exc = results["exc"].clone() - if exc.type == token.STRING: - msg = "Python 3 does not support string exceptions" - self.cannot_convert(node, msg) - return - - # Python 2 supports - # raise ((((E1, E2), E3), E4), E5), V - # as a synonym for - # raise E1, V - # Since Python 3 will not support this, we recurse down any tuple - # literals, always taking the first element. - if is_tuple(exc): - while is_tuple(exc): - # exc.children[1:-1] is the unparenthesized tuple - # exc.children[1].children[0] is the first element of the tuple - exc = exc.children[1].children[0].clone() - exc.prefix = u" " - - if "tb" in results: - tb = results["tb"].clone() - else: - tb = None - - if "val" in results: - val = results["val"].clone() - if is_tuple(val): - # Assume that exc is a subclass of Exception and call exc(*val). - args = [c.clone() for c in val.children[1:-1]] - exc = Call(exc, args) - elif val.type in (token.NUMBER, token.STRING): - # Handle numeric and string literals specially, e.g. - # "raise Exception, 5" -> "raise Exception(5)". - val.prefix = u"" - exc = Call(exc, [val]) - elif val.type == token.NAME and val.value == u"None": - # Handle None specially, e.g. - # "raise Exception, None" -> "raise Exception". - pass - else: - # val is some other expression. If val evaluates to an instance - # of exc, it should just be raised. If val evaluates to None, - # a default instance of exc should be raised (as above). If val - # evaluates to a tuple, exc(*val) should be called (as - # above). Otherwise, exc(val) should be called. We can only - # tell what to do at runtime, so defer to future.utils.raise_(), - # which handles all of these cases. - touch_import_top(u"future.utils", u"raise_", node) - exc.prefix = u"" - args = [exc, Comma(), val] - if tb is not None: - args += [Comma(), tb] - return Call(Name(u"raise_"), args) - - if tb is not None: - tb.prefix = "" - exc_list = Attr(exc, Name('with_traceback')) + [ArgList([tb])] - else: - exc_list = [exc] - - return pytree.Node(syms.raise_stmt, - [Name(u"raise")] + exc_list, - prefix=node.prefix) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_remove_old__future__imports.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_remove_old__future__imports.py deleted file mode 100755 index 9336f75..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_remove_old__future__imports.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -Fixer for removing any of these lines: - - from __future__ import with_statement - from __future__ import nested_scopes - from __future__ import generators - -The reason is that __future__ imports like these are required to be the first -line of code (after docstrings) on Python 2.6+, which can get in the way. - -These imports are always enabled in Python 2.6+, which is the minimum sane -version to target for Py2/3 compatibility. -""" - -from lib2to3 import fixer_base -from libfuturize.fixer_util import remove_future_import - -class FixRemoveOldFutureImports(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "file_input" - run_order = 1 - - def transform(self, node, results): - remove_future_import(u"with_statement", node) - remove_future_import(u"nested_scopes", node) - remove_future_import(u"generators", node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_unicode_keep_u.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_unicode_keep_u.py deleted file mode 100755 index 2e9a4e4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_unicode_keep_u.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Fixer that changes unicode to str and unichr to chr, but -- unlike the -lib2to3 fix_unicode.py fixer, does not change u"..." into "...". - -The reason is that Py3.3+ supports the u"..." string prefix, and, if -present, the prefix may provide useful information for disambiguating -between byte strings and unicode strings, which is often the hardest part -of the porting task. - -""" - -from lib2to3.pgen2 import token -from lib2to3 import fixer_base - -_mapping = {u"unichr" : u"chr", u"unicode" : u"str"} - -class FixUnicodeKeepU(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "'unicode' | 'unichr'" - - def transform(self, node, results): - if node.type == token.NAME: - new = node.clone() - new.value = _mapping[node.value] - return new diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_unicode_literals_import.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_unicode_literals_import.py deleted file mode 100755 index 51c5062..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_unicode_literals_import.py +++ /dev/null @@ -1,18 +0,0 @@ -""" -Adds this import: - - from __future__ import unicode_literals - -""" - -from lib2to3 import fixer_base -from libfuturize.fixer_util import future_import - -class FixUnicodeLiteralsImport(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "file_input" - - run_order = 9 - - def transform(self, node, results): - future_import(u"unicode_literals", node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_xrange_with_import.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_xrange_with_import.py deleted file mode 100755 index c910f81..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/fixes/fix_xrange_with_import.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -For the ``future`` package. - -Turns any xrange calls into range calls and adds this import line: - - from builtins import range - -at the top. -""" - -from lib2to3.fixes.fix_xrange import FixXrange - -from libfuturize.fixer_util import touch_import_top - - -class FixXrangeWithImport(FixXrange): - def transform(self, node, results): - result = super(FixXrangeWithImport, self).transform(node, results) - touch_import_top('builtins', 'range', node) - return result diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/main.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/main.py deleted file mode 100755 index 634c2f2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libfuturize/main.py +++ /dev/null @@ -1,322 +0,0 @@ -""" -futurize: automatic conversion to clean 2/3 code using ``python-future`` -====================================================================== - -Like Armin Ronacher's modernize.py, ``futurize`` attempts to produce clean -standard Python 3 code that runs on both Py2 and Py3. - -One pass --------- - -Use it like this on Python 2 code: - - $ futurize --verbose mypython2script.py - -This will attempt to port the code to standard Py3 code that also -provides Py2 compatibility with the help of the right imports from -``future``. - -To write changes to the files, use the -w flag. - -Two stages ----------- - -The ``futurize`` script can also be called in two separate stages. First: - - $ futurize --stage1 mypython2script.py - -This produces more modern Python 2 code that is not yet compatible with Python -3. The tests should still run and the diff should be uncontroversial to apply to -most Python projects that are willing to drop support for Python 2.5 and lower. - -After this, the recommended approach is to explicitly mark all strings that must -be byte-strings with a b'' prefix and all text (unicode) strings with a u'' -prefix, and then invoke the second stage of Python 2 to 2/3 conversion with:: - - $ futurize --stage2 mypython2script.py - -Stage 2 adds a dependency on ``future``. It converts most remaining Python -2-specific code to Python 3 code and adds appropriate imports from ``future`` -to restore Py2 support. - -The command above leaves all unadorned string literals as native strings -(byte-strings on Py2, unicode strings on Py3). If instead you would like all -unadorned string literals to be promoted to unicode, you can also pass this -flag: - - $ futurize --stage2 --unicode-literals mypython2script.py - -This adds the declaration ``from __future__ import unicode_literals`` to the -top of each file, which implicitly declares all unadorned string literals to be -unicode strings (``unicode`` on Py2). - -All imports ------------ - -The --all-imports option forces adding all ``__future__`` imports, -``builtins`` imports, and standard library aliases, even if they don't -seem necessary for the current state of each module. (This can simplify -testing, and can reduce the need to think about Py2 compatibility when editing -the code further.) - -""" - -from __future__ import (absolute_import, print_function, unicode_literals) -import future.utils -from future import __version__ - -import sys -import logging -import optparse -import os - -from lib2to3.main import warn, StdoutRefactoringTool -from lib2to3 import refactor - -from libfuturize.fixes import (lib2to3_fix_names_stage1, - lib2to3_fix_names_stage2, - libfuturize_fix_names_stage1, - libfuturize_fix_names_stage2) - -fixer_pkg = 'libfuturize.fixes' - - -def main(args=None): - """Main program. - - Args: - fixer_pkg: the name of a package where the fixers are located. - args: optional; a list of command line arguments. If omitted, - sys.argv[1:] is used. - - Returns a suggested exit status (0, 1, 2). - """ - - # Set up option parser - parser = optparse.OptionParser(usage="futurize [options] file|dir ...") - parser.add_option("-V", "--version", action="store_true", - help="Report the version number of futurize") - parser.add_option("-a", "--all-imports", action="store_true", - help="Add all __future__ and future imports to each module") - parser.add_option("-1", "--stage1", action="store_true", - help="Modernize Python 2 code only; no compatibility with Python 3 (or dependency on ``future``)") - parser.add_option("-2", "--stage2", action="store_true", - help="Take modernized (stage1) code and add a dependency on ``future`` to provide Py3 compatibility.") - parser.add_option("-0", "--both-stages", action="store_true", - help="Apply both stages 1 and 2") - parser.add_option("-u", "--unicode-literals", action="store_true", - help="Add ``from __future__ import unicode_literals`` to implicitly convert all unadorned string literals '' into unicode strings") - parser.add_option("-f", "--fix", action="append", default=[], - help="Each FIX specifies a transformation; default: all.\nEither use '-f division -f metaclass' etc. or use the fully-qualified module name: '-f lib2to3.fixes.fix_types -f libfuturize.fixes.fix_unicode_keep_u'") - parser.add_option("-j", "--processes", action="store", default=1, - type="int", help="Run 2to3 concurrently") - parser.add_option("-x", "--nofix", action="append", default=[], - help="Prevent a fixer from being run.") - parser.add_option("-l", "--list-fixes", action="store_true", - help="List available transformations") - parser.add_option("-p", "--print-function", action="store_true", - help="Modify the grammar so that print() is a function") - parser.add_option("-v", "--verbose", action="store_true", - help="More verbose logging") - parser.add_option("--no-diffs", action="store_true", - help="Don't show diffs of the refactoring") - parser.add_option("-w", "--write", action="store_true", - help="Write back modified files") - parser.add_option("-n", "--nobackups", action="store_true", default=False, - help="Don't write backups for modified files.") - parser.add_option("-o", "--output-dir", action="store", type="str", - default="", help="Put output files in this directory " - "instead of overwriting the input files. Requires -n. " - "For Python >= 2.7 only.") - parser.add_option("-W", "--write-unchanged-files", action="store_true", - help="Also write files even if no changes were required" - " (useful with --output-dir); implies -w.") - parser.add_option("--add-suffix", action="store", type="str", default="", - help="Append this string to all output filenames." - " Requires -n if non-empty. For Python >= 2.7 only." - "ex: --add-suffix='3' will generate .py3 files.") - - # Parse command line arguments - flags = {} - refactor_stdin = False - options, args = parser.parse_args(args) - - if options.write_unchanged_files: - flags["write_unchanged_files"] = True - if not options.write: - warn("--write-unchanged-files/-W implies -w.") - options.write = True - # If we allowed these, the original files would be renamed to backup names - # but not replaced. - if options.output_dir and not options.nobackups: - parser.error("Can't use --output-dir/-o without -n.") - if options.add_suffix and not options.nobackups: - parser.error("Can't use --add-suffix without -n.") - - if not options.write and options.no_diffs: - warn("not writing files and not printing diffs; that's not very useful") - if not options.write and options.nobackups: - parser.error("Can't use -n without -w") - if "-" in args: - refactor_stdin = True - if options.write: - print("Can't write to stdin.", file=sys.stderr) - return 2 - # Is this ever necessary? - if options.print_function: - flags["print_function"] = True - - # Set up logging handler - level = logging.DEBUG if options.verbose else logging.INFO - logging.basicConfig(format='%(name)s: %(message)s', level=level) - logger = logging.getLogger('libfuturize.main') - - if options.stage1 or options.stage2: - assert options.both_stages is None - options.both_stages = False - else: - options.both_stages = True - - avail_fixes = set() - - if options.stage1 or options.both_stages: - avail_fixes.update(lib2to3_fix_names_stage1) - avail_fixes.update(libfuturize_fix_names_stage1) - if options.stage2 or options.both_stages: - avail_fixes.update(lib2to3_fix_names_stage2) - avail_fixes.update(libfuturize_fix_names_stage2) - - if options.unicode_literals: - avail_fixes.add('libfuturize.fixes.fix_unicode_literals_import') - - if options.version: - print(__version__) - return 0 - if options.list_fixes: - print("Available transformations for the -f/--fix option:") - # for fixname in sorted(refactor.get_all_fix_names(fixer_pkg)): - for fixname in sorted(avail_fixes): - print(fixname) - if not args: - return 0 - if not args: - print("At least one file or directory argument required.", - file=sys.stderr) - print("Use --help to show usage.", file=sys.stderr) - return 2 - - unwanted_fixes = set() - for fix in options.nofix: - if ".fix_" in fix: - unwanted_fixes.add(fix) - else: - # Infer the full module name for the fixer. - # First ensure that no names clash (e.g. - # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah): - found = [f for f in avail_fixes - if f.endswith('fix_{0}'.format(fix))] - if len(found) > 1: - print("Ambiguous fixer name. Choose a fully qualified " - "module name instead from these:\n" + - "\n".join(" " + myf for myf in found), - file=sys.stderr) - return 2 - elif len(found) == 0: - print("Unknown fixer. Use --list-fixes or -l for a list.", - file=sys.stderr) - return 2 - unwanted_fixes.add(found[0]) - - extra_fixes = set() - if options.all_imports: - if options.stage1: - prefix = 'libfuturize.fixes.' - extra_fixes.add(prefix + - 'fix_add__future__imports_except_unicode_literals') - else: - # In case the user hasn't run stage1 for some reason: - prefix = 'libpasteurize.fixes.' - extra_fixes.add(prefix + 'fix_add_all__future__imports') - extra_fixes.add(prefix + 'fix_add_future_standard_library_import') - extra_fixes.add(prefix + 'fix_add_all_future_builtins') - explicit = set() - if options.fix: - all_present = False - for fix in options.fix: - if fix == 'all': - all_present = True - else: - if ".fix_" in fix: - explicit.add(fix) - else: - # Infer the full module name for the fixer. - # First ensure that no names clash (e.g. - # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah): - found = [f for f in avail_fixes - if f.endswith('fix_{0}'.format(fix))] - if len(found) > 1: - print("Ambiguous fixer name. Choose a fully qualified " - "module name instead from these:\n" + - "\n".join(" " + myf for myf in found), - file=sys.stderr) - return 2 - elif len(found) == 0: - print("Unknown fixer. Use --list-fixes or -l for a list.", - file=sys.stderr) - return 2 - explicit.add(found[0]) - if len(explicit & unwanted_fixes) > 0: - print("Conflicting usage: the following fixers have been " - "simultaneously requested and disallowed:\n" + - "\n".join(" " + myf for myf in (explicit & unwanted_fixes)), - file=sys.stderr) - return 2 - requested = avail_fixes.union(explicit) if all_present else explicit - else: - requested = avail_fixes.union(explicit) - fixer_names = (requested | extra_fixes) - unwanted_fixes - - input_base_dir = os.path.commonprefix(args) - if (input_base_dir and not input_base_dir.endswith(os.sep) - and not os.path.isdir(input_base_dir)): - # One or more similar names were passed, their directory is the base. - # os.path.commonprefix() is ignorant of path elements, this corrects - # for that weird API. - input_base_dir = os.path.dirname(input_base_dir) - if options.output_dir: - input_base_dir = input_base_dir.rstrip(os.sep) - logger.info('Output in %r will mirror the input directory %r layout.', - options.output_dir, input_base_dir) - - # Initialize the refactoring tool - if future.utils.PY26: - extra_kwargs = {} - else: - extra_kwargs = { - 'append_suffix': options.add_suffix, - 'output_dir': options.output_dir, - 'input_base_dir': input_base_dir, - } - - rt = StdoutRefactoringTool( - sorted(fixer_names), flags, sorted(explicit), - options.nobackups, not options.no_diffs, - **extra_kwargs) - - # Refactor all files and directories passed as arguments - if not rt.errors: - if refactor_stdin: - rt.refactor_stdin() - else: - try: - rt.refactor(args, options.write, None, - options.processes) - except refactor.MultiprocessingUnsupported: - assert options.processes > 1 - print("Sorry, -j isn't " \ - "supported on this platform.", file=sys.stderr) - return 1 - rt.summarize() - - # Return error status (0 if rt.errors is zero) - return int(bool(rt.errors)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/__init__.py deleted file mode 100755 index 4cb1cbc..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty to make this a package diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/__init__.py deleted file mode 100755 index 905aec4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -import sys -from lib2to3 import refactor - -# The original set of these fixes comes from lib3to2 (https://bitbucket.org/amentajo/lib3to2): -fix_names = set([ - 'libpasteurize.fixes.fix_add_all__future__imports', # from __future__ import absolute_import etc. on separate lines - 'libpasteurize.fixes.fix_add_future_standard_library_import', # we force adding this import for now, even if it doesn't seem necessary to the fix_future_standard_library fixer, for ease of testing - # 'libfuturize.fixes.fix_order___future__imports', # consolidates to a single line to simplify testing -- UNFINISHED - 'libpasteurize.fixes.fix_future_builtins', # adds "from future.builtins import *" - 'libfuturize.fixes.fix_future_standard_library', # adds "from future import standard_library" - - 'libpasteurize.fixes.fix_annotations', - # 'libpasteurize.fixes.fix_bitlength', # ints have this in Py2.7 - # 'libpasteurize.fixes.fix_bool', # need a decorator or Mixin - # 'libpasteurize.fixes.fix_bytes', # leave bytes as bytes - # 'libpasteurize.fixes.fix_classdecorator', # available in - # Py2.6+ - # 'libpasteurize.fixes.fix_collections', hmmm ... - # 'libpasteurize.fixes.fix_dctsetcomp', # avail in Py27 - 'libpasteurize.fixes.fix_division', # yes - # 'libpasteurize.fixes.fix_except', # avail in Py2.6+ - # 'libpasteurize.fixes.fix_features', # ? - 'libpasteurize.fixes.fix_fullargspec', - # 'libpasteurize.fixes.fix_funcattrs', - 'libpasteurize.fixes.fix_getcwd', - 'libpasteurize.fixes.fix_imports', # adds "from future import standard_library" - 'libpasteurize.fixes.fix_imports2', - # 'libpasteurize.fixes.fix_input', - # 'libpasteurize.fixes.fix_int', - # 'libpasteurize.fixes.fix_intern', - # 'libpasteurize.fixes.fix_itertools', - 'libpasteurize.fixes.fix_kwargs', # yes, we want this - # 'libpasteurize.fixes.fix_memoryview', - # 'libpasteurize.fixes.fix_metaclass', # write a custom handler for - # this - # 'libpasteurize.fixes.fix_methodattrs', # __func__ and __self__ seem to be defined on Py2.7 already - 'libpasteurize.fixes.fix_newstyle', # yes, we want this: explicit inheritance from object. Without new-style classes in Py2, super() will break etc. - # 'libpasteurize.fixes.fix_next', # use a decorator for this - # 'libpasteurize.fixes.fix_numliterals', # prob not - # 'libpasteurize.fixes.fix_open', # huh? - # 'libpasteurize.fixes.fix_print', # no way - 'libpasteurize.fixes.fix_printfunction', # adds __future__ import print_function - # 'libpasteurize.fixes.fix_raise_', # TODO: get this working! - - # 'libpasteurize.fixes.fix_range', # nope - # 'libpasteurize.fixes.fix_reduce', - # 'libpasteurize.fixes.fix_setliteral', - # 'libpasteurize.fixes.fix_str', - # 'libpasteurize.fixes.fix_super', # maybe, if our magic super() isn't robust enough - 'libpasteurize.fixes.fix_throw', # yes, if Py3 supports it - # 'libpasteurize.fixes.fix_unittest', - 'libpasteurize.fixes.fix_unpacking', # yes, this is useful - # 'libpasteurize.fixes.fix_with' # way out of date - ]) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/feature_base.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/feature_base.py deleted file mode 100755 index c36d9a9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/feature_base.py +++ /dev/null @@ -1,57 +0,0 @@ -u""" -Base classes for features that are backwards-incompatible. - -Usage: -features = Features() -features.add(Feature("py3k_feature", "power< 'py3k' any* >", "2.7")) -PATTERN = features.PATTERN -""" - -pattern_unformatted = u"%s=%s" # name=pattern, for dict lookups -message_unformatted = u""" -%s is only supported in Python %s and above.""" - -class Feature(object): - u""" - A feature has a name, a pattern, and a minimum version of Python 2.x - required to use the feature (or 3.x if there is no backwards-compatible - version of 2.x) - """ - def __init__(self, name, PATTERN, version): - self.name = name - self._pattern = PATTERN - self.version = version - - def message_text(self): - u""" - Format the above text with the name and minimum version required. - """ - return message_unformatted % (self.name, self.version) - -class Features(set): - u""" - A set of features that generates a pattern for the features it contains. - This set will act like a mapping in that we map names to patterns. - """ - mapping = {} - - def update_mapping(self): - u""" - Called every time we care about the mapping of names to features. - """ - self.mapping = dict([(f.name, f) for f in iter(self)]) - - @property - def PATTERN(self): - u""" - Uses the mapping of names to features to return a PATTERN suitable - for using the lib2to3 patcomp. - """ - self.update_mapping() - return u" |\n".join([pattern_unformatted % (f.name, f._pattern) for f in iter(self)]) - - def __getitem__(self, key): - u""" - Implement a simple mapping to get patterns from names. - """ - return self.mapping[key] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_all__future__imports.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_all__future__imports.py deleted file mode 100755 index a151f9f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_all__future__imports.py +++ /dev/null @@ -1,24 +0,0 @@ -""" -Fixer for adding: - - from __future__ import absolute_import - from __future__ import division - from __future__ import print_function - from __future__ import unicode_literals - -This is done when converting from Py3 to both Py3/Py2. -""" - -from lib2to3 import fixer_base -from libfuturize.fixer_util import future_import - -class FixAddAllFutureImports(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "file_input" - run_order = 1 - - def transform(self, node, results): - future_import(u"absolute_import", node) - future_import(u"division", node) - future_import(u"print_function", node) - future_import(u"unicode_literals", node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_all_future_builtins.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_all_future_builtins.py deleted file mode 100755 index 22911ba..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_all_future_builtins.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -For the ``future`` package. - -Adds this import line:: - - from builtins import (ascii, bytes, chr, dict, filter, hex, input, - int, list, map, next, object, oct, open, pow, - range, round, str, super, zip) - -to a module, irrespective of whether each definition is used. - -Adds these imports after any other imports (in an initial block of them). -""" - -from __future__ import unicode_literals - -from lib2to3 import fixer_base - -from libfuturize.fixer_util import touch_import_top - - -class FixAddAllFutureBuiltins(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "file_input" - run_order = 1 - - def transform(self, node, results): - # import_str = """(ascii, bytes, chr, dict, filter, hex, input, - # int, list, map, next, object, oct, open, pow, - # range, round, str, super, zip)""" - touch_import_top(u'builtins', '*', node) - - # builtins = """ascii bytes chr dict filter hex input - # int list map next object oct open pow - # range round str super zip""" - # for builtin in sorted(builtins.split(), reverse=True): - # touch_import_top(u'builtins', builtin, node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_future_standard_library_import.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_future_standard_library_import.py deleted file mode 100755 index 0778406..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_add_future_standard_library_import.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -For the ``future`` package. - -Adds this import line: - - from future import standard_library - -after any __future__ imports but before any other imports. Doesn't actually -change the imports to Py3 style. -""" - -from lib2to3 import fixer_base -from libfuturize.fixer_util import touch_import_top - -class FixAddFutureStandardLibraryImport(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "file_input" - run_order = 8 - - def transform(self, node, results): - # TODO: add a blank line between any __future__ imports and this? - touch_import_top(u'future', u'standard_library', node) - # TODO: also add standard_library.install_hooks() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_annotations.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_annotations.py deleted file mode 100755 index 884b674..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_annotations.py +++ /dev/null @@ -1,48 +0,0 @@ -u""" -Fixer to remove function annotations -""" - -from lib2to3 import fixer_base -from lib2to3.pgen2 import token -from lib2to3.fixer_util import syms - -warning_text = u"Removing function annotations completely." - -def param_without_annotations(node): - return node.children[0] - -class FixAnnotations(fixer_base.BaseFix): - - warned = False - - def warn_once(self, node, reason): - if not self.warned: - self.warned = True - self.warning(node, reason=reason) - - PATTERN = u""" - funcdef< 'def' any parameters< '(' [params=any] ')' > ['->' ret=any] ':' any* > - """ - - def transform(self, node, results): - u""" - This just strips annotations from the funcdef completely. - """ - params = results.get(u"params") - ret = results.get(u"ret") - if ret is not None: - assert ret.prev_sibling.type == token.RARROW, u"Invalid return annotation" - self.warn_once(node, reason=warning_text) - ret.prev_sibling.remove() - ret.remove() - if params is None: return - if params.type == syms.typedargslist: - # more than one param in a typedargslist - for param in params.children: - if param.type == syms.tname: - self.warn_once(node, reason=warning_text) - param.replace(param_without_annotations(param)) - elif params.type == syms.tname: - # one param - self.warn_once(node, reason=warning_text) - params.replace(param_without_annotations(params)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_division.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_division.py deleted file mode 100755 index 6a04871..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_division.py +++ /dev/null @@ -1,28 +0,0 @@ -u""" -Fixer for division: from __future__ import division if needed -""" - -from lib2to3 import fixer_base -from libfuturize.fixer_util import token, future_import - -def match_division(node): - u""" - __future__.division redefines the meaning of a single slash for division, - so we match that and only that. - """ - slash = token.SLASH - return node.type == slash and not node.next_sibling.type == slash and \ - not node.prev_sibling.type == slash - -class FixDivision(fixer_base.BaseFix): - run_order = 4 # this seems to be ignored? - - def match(self, node): - u""" - Since the tree needs to be fixed once and only once if and only if it - matches, then we can start discarding matches after we make the first. - """ - return match_division(node) - - def transform(self, node, results): - future_import(u"division", node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_features.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_features.py deleted file mode 100755 index 52630f9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_features.py +++ /dev/null @@ -1,86 +0,0 @@ -u""" -Warn about features that are not present in Python 2.5, giving a message that -points to the earliest version of Python 2.x (or 3.x, if none) that supports it -""" - -from .feature_base import Feature, Features -from lib2to3 import fixer_base - -FEATURES = [ - #(FeatureName, - # FeaturePattern, - # FeatureMinVersion, - #), - (u"memoryview", - u"power < 'memoryview' trailer < '(' any* ')' > any* >", - u"2.7", - ), - (u"numbers", - u"""import_from< 'from' 'numbers' 'import' any* > | - import_name< 'import' ('numbers' dotted_as_names< any* 'numbers' any* >) >""", - u"2.6", - ), - (u"abc", - u"""import_name< 'import' ('abc' dotted_as_names< any* 'abc' any* >) > | - import_from< 'from' 'abc' 'import' any* >""", - u"2.6", - ), - (u"io", - u"""import_name< 'import' ('io' dotted_as_names< any* 'io' any* >) > | - import_from< 'from' 'io' 'import' any* >""", - u"2.6", - ), - (u"bin", - u"power< 'bin' trailer< '(' any* ')' > any* >", - u"2.6", - ), - (u"formatting", - u"power< any trailer< '.' 'format' > trailer< '(' any* ')' > >", - u"2.6", - ), - (u"nonlocal", - u"global_stmt< 'nonlocal' any* >", - u"3.0", - ), - (u"with_traceback", - u"trailer< '.' 'with_traceback' >", - u"3.0", - ), -] - -class FixFeatures(fixer_base.BaseFix): - - run_order = 9 # Wait until all other fixers have run to check for these - - # To avoid spamming, we only want to warn for each feature once. - features_warned = set() - - # Build features from the list above - features = Features([Feature(name, pattern, version) for \ - name, pattern, version in FEATURES]) - - PATTERN = features.PATTERN - - def match(self, node): - to_ret = super(FixFeatures, self).match(node) - # We want the mapping only to tell us the node's specific information. - try: - del to_ret[u'node'] - except Exception: - # We want it to delete the 'node' from the results - # if it's there, so we don't care if it fails for normal reasons. - pass - return to_ret - - def transform(self, node, results): - for feature_name in results: - if feature_name in self.features_warned: - continue - else: - curr_feature = self.features[feature_name] - if curr_feature.version >= u"3": - fail = self.cannot_convert - else: - fail = self.warning - fail(node, reason=curr_feature.message_text()) - self.features_warned.add(feature_name) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_fullargspec.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_fullargspec.py deleted file mode 100755 index 4bd37e1..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_fullargspec.py +++ /dev/null @@ -1,16 +0,0 @@ -u""" -Fixer for getfullargspec -> getargspec -""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name - -warn_msg = u"some of the values returned by getfullargspec are not valid in Python 2 and have no equivalent." - -class FixFullargspec(fixer_base.BaseFix): - - PATTERN = u"'getfullargspec'" - - def transform(self, node, results): - self.warning(node, warn_msg) - return Name(u"getargspec", prefix=node.prefix) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_future_builtins.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_future_builtins.py deleted file mode 100755 index 6849679..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_future_builtins.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -Adds this import line: - - from builtins import XYZ - -for each of the functions XYZ that is used in the module. -""" - -from __future__ import unicode_literals - -from lib2to3 import fixer_base -from lib2to3.pygram import python_symbols as syms -from lib2to3.fixer_util import Name, Call, in_special_context - -from libfuturize.fixer_util import touch_import_top - -# All builtins are: -# from future.builtins.iterators import (filter, map, zip) -# from future.builtins.misc import (ascii, chr, hex, input, isinstance, oct, open, round, super) -# from future.types import (bytes, dict, int, range, str) -# We don't need isinstance any more. - -replaced_builtins = '''filter map zip - ascii chr hex input next oct open round super - bytes dict int range str'''.split() - -expression = '|'.join(["name='{0}'".format(name) for name in replaced_builtins]) - - -class FixFutureBuiltins(fixer_base.BaseFix): - BM_compatible = True - run_order = 9 - - # Currently we only match uses as a function. This doesn't match e.g.: - # if isinstance(s, str): - # ... - PATTERN = """ - power< - ({0}) trailer< '(' args=[any] ')' > - rest=any* > - """.format(expression) - - def transform(self, node, results): - name = results["name"] - touch_import_top(u'builtins', name.value, node) - # name.replace(Name(u"input", prefix=name.prefix)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_getcwd.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_getcwd.py deleted file mode 100755 index 9b7f002..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_getcwd.py +++ /dev/null @@ -1,26 +0,0 @@ -u""" -Fixer for os.getcwd() -> os.getcwdu(). -Also warns about "from os import getcwd", suggesting the above form. -""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name - -class FixGetcwd(fixer_base.BaseFix): - - PATTERN = u""" - power< 'os' trailer< dot='.' name='getcwd' > any* > - | - import_from< 'from' 'os' 'import' bad='getcwd' > - """ - - def transform(self, node, results): - if u"name" in results: - name = results[u"name"] - name.replace(Name(u"getcwdu", prefix=name.prefix)) - elif u"bad" in results: - # Can't convert to getcwdu and then expect to catch every use. - self.cannot_convert(node, u"import os, use os.getcwd() instead.") - return - else: - raise ValueError(u"For some reason, the pattern matcher failed.") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_imports.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_imports.py deleted file mode 100755 index 2d6718f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_imports.py +++ /dev/null @@ -1,112 +0,0 @@ -u""" -Fixer for standard library imports renamed in Python 3 -""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name, is_probably_builtin, Newline, does_tree_import -from lib2to3.pygram import python_symbols as syms -from lib2to3.pgen2 import token -from lib2to3.pytree import Node, Leaf - -from libfuturize.fixer_util import touch_import_top -# from ..fixer_util import NameImport - -# used in simple_mapping_to_pattern() -MAPPING = {u"reprlib": u"repr", - u"winreg": u"_winreg", - u"configparser": u"ConfigParser", - u"copyreg": u"copy_reg", - u"queue": u"Queue", - u"socketserver": u"SocketServer", - u"_markupbase": u"markupbase", - u"test.support": u"test.test_support", - u"dbm.bsd": u"dbhash", - u"dbm.ndbm": u"dbm", - u"dbm.dumb": u"dumbdbm", - u"dbm.gnu": u"gdbm", - u"html.parser": u"HTMLParser", - u"html.entities": u"htmlentitydefs", - u"http.client": u"httplib", - u"http.cookies": u"Cookie", - u"http.cookiejar": u"cookielib", -# "tkinter": "Tkinter", - u"tkinter.dialog": u"Dialog", - u"tkinter._fix": u"FixTk", - u"tkinter.scrolledtext": u"ScrolledText", - u"tkinter.tix": u"Tix", - u"tkinter.constants": u"Tkconstants", - u"tkinter.dnd": u"Tkdnd", - u"tkinter.__init__": u"Tkinter", - u"tkinter.colorchooser": u"tkColorChooser", - u"tkinter.commondialog": u"tkCommonDialog", - u"tkinter.font": u"tkFont", - u"tkinter.ttk": u"ttk", - u"tkinter.messagebox": u"tkMessageBox", - u"tkinter.turtle": u"turtle", - u"urllib.robotparser": u"robotparser", - u"xmlrpc.client": u"xmlrpclib", - u"builtins": u"__builtin__", -} - -# generic strings to help build patterns -# these variables mean (with http.client.HTTPConnection as an example): -# name = http -# attr = client -# used = HTTPConnection -# fmt_name is a formatted subpattern (simple_name_match or dotted_name_match) - -# helps match 'queue', as in 'from queue import ...' -simple_name_match = u"name='%s'" -# helps match 'client', to be used if client has been imported from http -subname_match = u"attr='%s'" -# helps match 'http.client', as in 'import urllib.request' -dotted_name_match = u"dotted_name=dotted_name< %s '.' %s >" -# helps match 'queue', as in 'queue.Queue(...)' -power_onename_match = u"%s" -# helps match 'http.client', as in 'http.client.HTTPConnection(...)' -power_twoname_match = u"power< %s trailer< '.' %s > any* >" -# helps match 'client.HTTPConnection', if 'client' has been imported from http -power_subname_match = u"power< %s any* >" -# helps match 'from http.client import HTTPConnection' -from_import_match = u"from_import=import_from< 'from' %s 'import' imported=any >" -# helps match 'from http import client' -from_import_submod_match = u"from_import_submod=import_from< 'from' %s 'import' (%s | import_as_name< %s 'as' renamed=any > | import_as_names< any* (%s | import_as_name< %s 'as' renamed=any >) any* > ) >" -# helps match 'import urllib.request' -name_import_match = u"name_import=import_name< 'import' %s > | name_import=import_name< 'import' dotted_as_name< %s 'as' renamed=any > >" -# helps match 'import http.client, winreg' -multiple_name_import_match = u"name_import=import_name< 'import' dotted_as_names< names=any* > >" - -def all_patterns(name): - u""" - Accepts a string and returns a pattern of possible patterns involving that name - Called by simple_mapping_to_pattern for each name in the mapping it receives. - """ - - # i_ denotes an import-like node - # u_ denotes a node that appears to be a usage of the name - if u'.' in name: - name, attr = name.split(u'.', 1) - simple_name = simple_name_match % (name) - simple_attr = subname_match % (attr) - dotted_name = dotted_name_match % (simple_name, simple_attr) - i_from = from_import_match % (dotted_name) - i_from_submod = from_import_submod_match % (simple_name, simple_attr, simple_attr, simple_attr, simple_attr) - i_name = name_import_match % (dotted_name, dotted_name) - u_name = power_twoname_match % (simple_name, simple_attr) - u_subname = power_subname_match % (simple_attr) - return u' | \n'.join((i_name, i_from, i_from_submod, u_name, u_subname)) - else: - simple_name = simple_name_match % (name) - i_name = name_import_match % (simple_name, simple_name) - i_from = from_import_match % (simple_name) - u_name = power_onename_match % (simple_name) - return u' | \n'.join((i_name, i_from, u_name)) - - -class FixImports(fixer_base.BaseFix): - - PATTERN = u' | \n'.join([all_patterns(name) for name in MAPPING]) - PATTERN = u' | \n'.join((PATTERN, multiple_name_import_match)) - - def transform(self, node, results): - touch_import_top(u'future', u'standard_library', node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_imports2.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_imports2.py deleted file mode 100755 index 70444e9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_imports2.py +++ /dev/null @@ -1,174 +0,0 @@ -u""" -Fixer for complicated imports -""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name, String, FromImport, Newline, Comma -from libfuturize.fixer_util import touch_import_top - - -TK_BASE_NAMES = (u'ACTIVE', u'ALL', u'ANCHOR', u'ARC',u'BASELINE', u'BEVEL', u'BOTH', - u'BOTTOM', u'BROWSE', u'BUTT', u'CASCADE', u'CENTER', u'CHAR', - u'CHECKBUTTON', u'CHORD', u'COMMAND', u'CURRENT', u'DISABLED', - u'DOTBOX', u'E', u'END', u'EW', u'EXCEPTION', u'EXTENDED', u'FALSE', - u'FIRST', u'FLAT', u'GROOVE', u'HIDDEN', u'HORIZONTAL', u'INSERT', - u'INSIDE', u'LAST', u'LEFT', u'MITER', u'MOVETO', u'MULTIPLE', u'N', - u'NE', u'NO', u'NONE', u'NORMAL', u'NS', u'NSEW', u'NUMERIC', u'NW', - u'OFF', u'ON', u'OUTSIDE', u'PAGES', u'PIESLICE', u'PROJECTING', - u'RADIOBUTTON', u'RAISED', u'READABLE', u'RIDGE', u'RIGHT', - u'ROUND', u'S', u'SCROLL', u'SE', u'SEL', u'SEL_FIRST', u'SEL_LAST', - u'SEPARATOR', u'SINGLE', u'SOLID', u'SUNKEN', u'SW', u'StringTypes', - u'TOP', u'TRUE', u'TclVersion', u'TkVersion', u'UNDERLINE', - u'UNITS', u'VERTICAL', u'W', u'WORD', u'WRITABLE', u'X', u'Y', u'YES', - u'wantobjects') - -PY2MODULES = { - u'urllib2' : ( - u'AbstractBasicAuthHandler', u'AbstractDigestAuthHandler', - u'AbstractHTTPHandler', u'BaseHandler', u'CacheFTPHandler', - u'FTPHandler', u'FileHandler', u'HTTPBasicAuthHandler', - u'HTTPCookieProcessor', u'HTTPDefaultErrorHandler', - u'HTTPDigestAuthHandler', u'HTTPError', u'HTTPErrorProcessor', - u'HTTPHandler', u'HTTPPasswordMgr', - u'HTTPPasswordMgrWithDefaultRealm', u'HTTPRedirectHandler', - u'HTTPSHandler', u'OpenerDirector', u'ProxyBasicAuthHandler', - u'ProxyDigestAuthHandler', u'ProxyHandler', u'Request', - u'StringIO', u'URLError', u'UnknownHandler', u'addinfourl', - u'build_opener', u'install_opener', u'parse_http_list', - u'parse_keqv_list', u'randombytes', u'request_host', u'urlopen'), - u'urllib' : ( - u'ContentTooShortError', u'FancyURLopener',u'URLopener', - u'basejoin', u'ftperrors', u'getproxies', - u'getproxies_environment', u'localhost', u'pathname2url', - u'quote', u'quote_plus', u'splitattr', u'splithost', - u'splitnport', u'splitpasswd', u'splitport', u'splitquery', - u'splittag', u'splittype', u'splituser', u'splitvalue', - u'thishost', u'unquote', u'unquote_plus', u'unwrap', - u'url2pathname', u'urlcleanup', u'urlencode', u'urlopen', - u'urlretrieve',), - u'urlparse' : ( - u'parse_qs', u'parse_qsl', u'urldefrag', u'urljoin', - u'urlparse', u'urlsplit', u'urlunparse', u'urlunsplit'), - u'dbm' : ( - u'ndbm', u'gnu', u'dumb'), - u'anydbm' : ( - u'error', u'open'), - u'whichdb' : ( - u'whichdb',), - u'BaseHTTPServer' : ( - u'BaseHTTPRequestHandler', u'HTTPServer'), - u'CGIHTTPServer' : ( - u'CGIHTTPRequestHandler',), - u'SimpleHTTPServer' : ( - u'SimpleHTTPRequestHandler',), - u'FileDialog' : TK_BASE_NAMES + ( - u'FileDialog', u'LoadFileDialog', u'SaveFileDialog', - u'dialogstates', u'test'), - u'tkFileDialog' : ( - u'Directory', u'Open', u'SaveAs', u'_Dialog', u'askdirectory', - u'askopenfile', u'askopenfilename', u'askopenfilenames', - u'askopenfiles', u'asksaveasfile', u'asksaveasfilename'), - u'SimpleDialog' : TK_BASE_NAMES + ( - u'SimpleDialog',), - u'tkSimpleDialog' : TK_BASE_NAMES + ( - u'askfloat', u'askinteger', u'askstring', u'Dialog'), - u'SimpleXMLRPCServer' : ( - u'CGIXMLRPCRequestHandler', u'SimpleXMLRPCDispatcher', - u'SimpleXMLRPCRequestHandler', u'SimpleXMLRPCServer', - u'list_public_methods', u'remove_duplicates', - u'resolve_dotted_attribute'), - u'DocXMLRPCServer' : ( - u'DocCGIXMLRPCRequestHandler', u'DocXMLRPCRequestHandler', - u'DocXMLRPCServer', u'ServerHTMLDoc',u'XMLRPCDocGenerator'), - } - -MAPPING = { u'urllib.request' : - (u'urllib2', u'urllib'), - u'urllib.error' : - (u'urllib2', u'urllib'), - u'urllib.parse' : - (u'urllib2', u'urllib', u'urlparse'), - u'dbm.__init__' : - (u'anydbm', u'whichdb'), - u'http.server' : - (u'CGIHTTPServer', u'SimpleHTTPServer', u'BaseHTTPServer'), - u'tkinter.filedialog' : - (u'tkFileDialog', u'FileDialog'), - u'tkinter.simpledialog' : - (u'tkSimpleDialog', u'SimpleDialog'), - u'xmlrpc.server' : - (u'DocXMLRPCServer', u'SimpleXMLRPCServer'), - } - -# helps match 'http', as in 'from http.server import ...' -simple_name = u"name='%s'" -# helps match 'server', as in 'from http.server import ...' -simple_attr = u"attr='%s'" -# helps match 'HTTPServer', as in 'from http.server import HTTPServer' -simple_using = u"using='%s'" -# helps match 'urllib.request', as in 'import urllib.request' -dotted_name = u"dotted_name=dotted_name< %s '.' %s >" -# helps match 'http.server', as in 'http.server.HTTPServer(...)' -power_twoname = u"pow=power< %s trailer< '.' %s > trailer< '.' using=any > any* >" -# helps match 'dbm.whichdb', as in 'dbm.whichdb(...)' -power_onename = u"pow=power< %s trailer< '.' using=any > any* >" -# helps match 'from http.server import HTTPServer' -# also helps match 'from http.server import HTTPServer, SimpleHTTPRequestHandler' -# also helps match 'from http.server import *' -from_import = u"from_import=import_from< 'from' %s 'import' (import_as_name< using=any 'as' renamed=any> | in_list=import_as_names< using=any* > | using='*' | using=NAME) >" -# helps match 'import urllib.request' -name_import = u"name_import=import_name< 'import' (%s | in_list=dotted_as_names< imp_list=any* >) >" - -############# -# WON'T FIX # -############# - -# helps match 'import urllib.request as name' -name_import_rename = u"name_import_rename=dotted_as_name< %s 'as' renamed=any >" -# helps match 'from http import server' -from_import_rename = u"from_import_rename=import_from< 'from' %s 'import' (%s | import_as_name< %s 'as' renamed=any > | in_list=import_as_names< any* (%s | import_as_name< %s 'as' renamed=any >) any* >) >" - - -def all_modules_subpattern(): - u""" - Builds a pattern for all toplevel names - (urllib, http, etc) - """ - names_dot_attrs = [mod.split(u".") for mod in MAPPING] - ret = u"( " + u" | ".join([dotted_name % (simple_name % (mod[0]), - simple_attr % (mod[1])) for mod in names_dot_attrs]) - ret += u" | " - ret += u" | ".join([simple_name % (mod[0]) for mod in names_dot_attrs if mod[1] == u"__init__"]) + u" )" - return ret - - -def build_import_pattern(mapping1, mapping2): - u""" - mapping1: A dict mapping py3k modules to all possible py2k replacements - mapping2: A dict mapping py2k modules to the things they do - This builds a HUGE pattern to match all ways that things can be imported - """ - # py3k: urllib.request, py2k: ('urllib2', 'urllib') - yield from_import % (all_modules_subpattern()) - for py3k, py2k in mapping1.items(): - name, attr = py3k.split(u'.') - s_name = simple_name % (name) - s_attr = simple_attr % (attr) - d_name = dotted_name % (s_name, s_attr) - yield name_import % (d_name) - yield power_twoname % (s_name, s_attr) - if attr == u'__init__': - yield name_import % (s_name) - yield power_onename % (s_name) - yield name_import_rename % (d_name) - yield from_import_rename % (s_name, s_attr, s_attr, s_attr, s_attr) - - -class FixImports2(fixer_base.BaseFix): - - run_order = 4 - - PATTERN = u" | \n".join(build_import_pattern(MAPPING, PY2MODULES)) - - def transform(self, node, results): - touch_import_top(u'future', u'standard_library', node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_kwargs.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_kwargs.py deleted file mode 100755 index 290f991..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_kwargs.py +++ /dev/null @@ -1,147 +0,0 @@ -u""" -Fixer for Python 3 function parameter syntax -This fixer is rather sensitive to incorrect py3k syntax. -""" - -# Note: "relevant" parameters are parameters following the first STAR in the list. - -from lib2to3 import fixer_base -from lib2to3.fixer_util import token, String, Newline, Comma, Name -from libfuturize.fixer_util import indentation, suitify, DoubleStar - -_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']" -_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s" -_else_template = u"else: %(name)s = %(default)s" -_kwargs_default_name = u"_3to2kwargs" - -def gen_params(raw_params): - u""" - Generator that yields tuples of (name, default_value) for each parameter in the list - If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None')) - """ - assert raw_params[0].type == token.STAR and len(raw_params) > 2 - curr_idx = 2 # the first place a keyword-only parameter name can be is index 2 - max_idx = len(raw_params) - while curr_idx < max_idx: - curr_item = raw_params[curr_idx] - prev_item = curr_item.prev_sibling - if curr_item.type != token.NAME: - curr_idx += 1 - continue - if prev_item is not None and prev_item.type == token.DOUBLESTAR: - break - name = curr_item.value - nxt = curr_item.next_sibling - if nxt is not None and nxt.type == token.EQUAL: - default_value = nxt.next_sibling - curr_idx += 2 - else: - default_value = None - yield (name, default_value) - curr_idx += 1 - -def remove_params(raw_params, kwargs_default=_kwargs_default_name): - u""" - Removes all keyword-only args from the params list and a bare star, if any. - Does not add the kwargs dict if needed. - Returns True if more action is needed, False if not - (more action is needed if no kwargs dict exists) - """ - assert raw_params[0].type == token.STAR - if raw_params[1].type == token.COMMA: - raw_params[0].remove() - raw_params[1].remove() - kw_params = raw_params[2:] - else: - kw_params = raw_params[3:] - for param in kw_params: - if param.type != token.DOUBLESTAR: - param.remove() - else: - return False - else: - return True - -def needs_fixing(raw_params, kwargs_default=_kwargs_default_name): - u""" - Returns string with the name of the kwargs dict if the params after the first star need fixing - Otherwise returns empty string - """ - found_kwargs = False - needs_fix = False - - for t in raw_params[2:]: - if t.type == token.COMMA: - # Commas are irrelevant at this stage. - continue - elif t.type == token.NAME and not found_kwargs: - # Keyword-only argument: definitely need to fix. - needs_fix = True - elif t.type == token.NAME and found_kwargs: - # Return 'foobar' of **foobar, if needed. - return t.value if needs_fix else u'' - elif t.type == token.DOUBLESTAR: - # Found either '*' from **foobar. - found_kwargs = True - else: - # Never found **foobar. Return a synthetic name, if needed. - return kwargs_default if needs_fix else u'' - -class FixKwargs(fixer_base.BaseFix): - - run_order = 7 # Run after function annotations are removed - - PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >" - - def transform(self, node, results): - params_rawlist = results[u"params"] - for i, item in enumerate(params_rawlist): - if item.type == token.STAR: - params_rawlist = params_rawlist[i:] - break - else: - return - # params is guaranteed to be a list starting with *. - # if fixing is needed, there will be at least 3 items in this list: - # [STAR, COMMA, NAME] is the minimum that we need to worry about. - new_kwargs = needs_fixing(params_rawlist) - # new_kwargs is the name of the kwargs dictionary. - if not new_kwargs: - return - suitify(node) - - # At this point, params_rawlist is guaranteed to be a list - # beginning with a star that includes at least one keyword-only param - # e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or - # [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME] - - # Anatomy of a funcdef: ['def', 'name', parameters, ':', suite] - # Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts] - # We need to insert our new stuff before the first_stmt and change the - # first_stmt's prefix. - - suite = node.children[4] - first_stmt = suite.children[2] - ident = indentation(first_stmt) - - for name, default_value in gen_params(params_rawlist): - if default_value is None: - suite.insert_child(2, Newline()) - suite.insert_child(2, String(_assign_template %{u'name':name, u'kwargs':new_kwargs}, prefix=ident)) - else: - suite.insert_child(2, Newline()) - suite.insert_child(2, String(_else_template %{u'name':name, u'default':default_value}, prefix=ident)) - suite.insert_child(2, Newline()) - suite.insert_child(2, String(_if_template %{u'assign':_assign_template %{u'name':name, u'kwargs':new_kwargs}, u'name':name, u'kwargs':new_kwargs}, prefix=ident)) - first_stmt.prefix = ident - suite.children[2].prefix = u"" - - # Now, we need to fix up the list of params. - - must_add_kwargs = remove_params(params_rawlist) - if must_add_kwargs: - arglist = results[u'arglist'] - if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA: - arglist.append_child(Comma()) - arglist.append_child(DoubleStar(prefix=u" ")) - arglist.append_child(Name(new_kwargs)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_memoryview.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_memoryview.py deleted file mode 100755 index a20f6f3..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_memoryview.py +++ /dev/null @@ -1,21 +0,0 @@ -u""" -Fixer for memoryview(s) -> buffer(s). -Explicit because some memoryview methods are invalid on buffer objects. -""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name - - -class FixMemoryview(fixer_base.BaseFix): - - explicit = True # User must specify that they want this. - - PATTERN = u""" - power< name='memoryview' trailer< '(' [any] ')' > - rest=any* > - """ - - def transform(self, node, results): - name = results[u"name"] - name.replace(Name(u"buffer", prefix=name.prefix)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_metaclass.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_metaclass.py deleted file mode 100755 index 52dd1d1..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_metaclass.py +++ /dev/null @@ -1,78 +0,0 @@ -u""" -Fixer for (metaclass=X) -> __metaclass__ = X -Some semantics (see PEP 3115) may be altered in the translation.""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name, syms, Node, Leaf, Newline, find_root -from lib2to3.pygram import token -from libfuturize.fixer_util import indentation, suitify -# from ..fixer_util import Name, syms, Node, Leaf, Newline, find_root, indentation, suitify - -def has_metaclass(parent): - results = None - for node in parent.children: - kids = node.children - if node.type == syms.argument: - if kids[0] == Leaf(token.NAME, u"metaclass") and \ - kids[1] == Leaf(token.EQUAL, u"=") and \ - kids[2]: - #Hack to avoid "class X(=):" with this case. - results = [node] + kids - break - elif node.type == syms.arglist: - # Argument list... loop through it looking for: - # Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)] - for child in node.children: - if results: break - if child.type == token.COMMA: - #Store the last comma, which precedes the metaclass - comma = child - elif type(child) == Node: - meta = equal = name = None - for arg in child.children: - if arg == Leaf(token.NAME, u"metaclass"): - #We have the (metaclass) part - meta = arg - elif meta and arg == Leaf(token.EQUAL, u"="): - #We have the (metaclass=) part - equal = arg - elif meta and equal: - #Here we go, we have (metaclass=X) - name = arg - results = (comma, meta, equal, name) - break - return results - - -class FixMetaclass(fixer_base.BaseFix): - - PATTERN = u""" - classdef - """ - - def transform(self, node, results): - meta_results = has_metaclass(node) - if not meta_results: return - for meta in meta_results: - meta.remove() - target = Leaf(token.NAME, u"__metaclass__") - equal = Leaf(token.EQUAL, u"=", prefix=u" ") - # meta is the last item in what was returned by has_metaclass(): name - name = meta - name.prefix = u" " - stmt_node = Node(syms.atom, [target, equal, name]) - - suitify(node) - for item in node.children: - if item.type == syms.suite: - for stmt in item.children: - if stmt.type == token.INDENT: - # Insert, in reverse order, the statement, a newline, - # and an indent right after the first indented line - loc = item.children.index(stmt) + 1 - # Keep consistent indentation form - ident = Leaf(token.INDENT, stmt.value) - item.insert_child(loc, ident) - item.insert_child(loc, Newline()) - item.insert_child(loc, stmt_node) - break diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_newstyle.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_newstyle.py deleted file mode 100755 index cc6b3ad..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_newstyle.py +++ /dev/null @@ -1,33 +0,0 @@ -u""" -Fixer for "class Foo: ..." -> "class Foo(object): ..." -""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import LParen, RParen, Name - -from libfuturize.fixer_util import touch_import_top - - -def insert_object(node, idx): - node.insert_child(idx, RParen()) - node.insert_child(idx, Name(u"object")) - node.insert_child(idx, LParen()) - -class FixNewstyle(fixer_base.BaseFix): - - # Match: - # class Blah: - # and: - # class Blah(): - - PATTERN = u"classdef< 'class' NAME ['(' ')'] colon=':' any >" - - def transform(self, node, results): - colon = results[u"colon"] - idx = node.children.index(colon) - if (node.children[idx-2].value == '(' and - node.children[idx-1].value == ')'): - del node.children[idx-2:idx] - idx -= 2 - insert_object(node, idx) - touch_import_top(u'builtins', 'object', node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_next.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_next.py deleted file mode 100755 index 9ecb6c0..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_next.py +++ /dev/null @@ -1,43 +0,0 @@ -u""" -Fixer for: -it.__next__() -> it.next(). -next(it) -> it.next(). -""" - -from lib2to3.pgen2 import token -from lib2to3.pygram import python_symbols as syms -from lib2to3 import fixer_base -from lib2to3.fixer_util import Name, Call, find_binding, Attr - -bind_warning = u"Calls to builtin next() possibly shadowed by global binding" - - -class FixNext(fixer_base.BaseFix): - - PATTERN = u""" - power< base=any+ trailer< '.' attr='__next__' > any* > - | - power< head='next' trailer< '(' arg=any ')' > any* > - | - classdef< 'class' base=any+ ':' - suite< any* - funcdef< 'def' - attr='__next__' - parameters< '(' NAME ')' > any+ > - any* > > - """ - - def transform(self, node, results): - assert results - - base = results.get(u"base") - attr = results.get(u"attr") - head = results.get(u"head") - arg_ = results.get(u"arg") - if arg_: - arg = arg_.clone() - head.replace(Attr(Name(unicode(arg),prefix=head.prefix), - Name(u"next"))) - arg_.remove() - elif base: - attr.replace(Name(u"next", prefix=attr.prefix)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_printfunction.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_printfunction.py deleted file mode 100755 index a2a6e08..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_printfunction.py +++ /dev/null @@ -1,17 +0,0 @@ -u""" -Fixer for print: from __future__ import print_function. -""" - -from lib2to3 import fixer_base -from libfuturize.fixer_util import future_import - -class FixPrintfunction(fixer_base.BaseFix): - - # explicit = True - - PATTERN = u""" - power< 'print' trailer < '(' any* ')' > any* > - """ - - def transform(self, node, results): - future_import(u"print_function", node) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_raise.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_raise.py deleted file mode 100755 index 9c9c192..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_raise.py +++ /dev/null @@ -1,25 +0,0 @@ -u"""Fixer for 'raise E(V).with_traceback(T)' -> 'raise E, V, T'""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import Comma, Node, Leaf, token, syms - -class FixRaise(fixer_base.BaseFix): - - PATTERN = u""" - raise_stmt< 'raise' (power< name=any [trailer< '(' val=any* ')' >] - [trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' >] > | any) ['from' chain=any] >""" - - def transform(self, node, results): - name, val, trc = (results.get(u"name"), results.get(u"val"), results.get(u"trc")) - chain = results.get(u"chain") - if chain is not None: - self.warning(node, u"explicit exception chaining is not supported in Python 2") - chain.prev_sibling.remove() - chain.remove() - if trc is not None: - val = val[0] if val else Leaf(token.NAME, u"None") - val.prefix = trc.prefix = u" " - kids = [Leaf(token.NAME, u"raise"), name.clone(), Comma(), - val.clone(), Comma(), trc.clone()] - raise_stmt = Node(syms.raise_stmt, kids) - node.replace(raise_stmt) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_raise_.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_raise_.py deleted file mode 100755 index 0f020c4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_raise_.py +++ /dev/null @@ -1,35 +0,0 @@ -u"""Fixer for - raise E(V).with_traceback(T) - to: - from future.utils import raise_ - ... - raise_(E, V, T) - -TODO: FIXME!! - -""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import Comma, Node, Leaf, token, syms - -class FixRaise(fixer_base.BaseFix): - - PATTERN = u""" - raise_stmt< 'raise' (power< name=any [trailer< '(' val=any* ')' >] - [trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' >] > | any) ['from' chain=any] >""" - - def transform(self, node, results): - FIXME - name, val, trc = (results.get(u"name"), results.get(u"val"), results.get(u"trc")) - chain = results.get(u"chain") - if chain is not None: - self.warning(node, u"explicit exception chaining is not supported in Python 2") - chain.prev_sibling.remove() - chain.remove() - if trc is not None: - val = val[0] if val else Leaf(token.NAME, u"None") - val.prefix = trc.prefix = u" " - kids = [Leaf(token.NAME, u"raise"), name.clone(), Comma(), - val.clone(), Comma(), trc.clone()] - raise_stmt = Node(syms.raise_stmt, kids) - node.replace(raise_stmt) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_throw.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_throw.py deleted file mode 100755 index c0feed1..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_throw.py +++ /dev/null @@ -1,23 +0,0 @@ -u"""Fixer for 'g.throw(E(V).with_traceback(T))' -> 'g.throw(E, V, T)'""" - -from lib2to3 import fixer_base -from lib2to3.pytree import Node, Leaf -from lib2to3.pgen2 import token -from lib2to3.fixer_util import Comma - -class FixThrow(fixer_base.BaseFix): - - PATTERN = u""" - power< any trailer< '.' 'throw' > - trailer< '(' args=power< exc=any trailer< '(' val=any* ')' > - trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' > > ')' > > - """ - - def transform(self, node, results): - syms = self.syms - exc, val, trc = (results[u"exc"], results[u"val"], results[u"trc"]) - val = val[0] if val else Leaf(token.NAME, u"None") - val.prefix = trc.prefix = u" " - kids = [exc.clone(), Comma(), val.clone(), Comma(), trc.clone()] - args = results[u"args"] - args.children = kids diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_unpacking.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_unpacking.py deleted file mode 100755 index c2d3207..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/fixes/fix_unpacking.py +++ /dev/null @@ -1,120 +0,0 @@ -u""" -Fixer for: -(a,)* *b (,c)* [,] = s -for (a,)* *b (,c)* [,] in d: ... -""" - -from lib2to3 import fixer_base -from itertools import count -from lib2to3.fixer_util import (Assign, Comma, Call, Newline, Name, - Number, token, syms, Node, Leaf) -from libfuturize.fixer_util import indentation, suitify, commatize -# from libfuturize.fixer_util import Assign, Comma, Call, Newline, Name, Number, indentation, suitify, commatize, token, syms, Node, Leaf - -def assignment_source(num_pre, num_post, LISTNAME, ITERNAME): - u""" - Accepts num_pre and num_post, which are counts of values - before and after the starg (not including the starg) - Returns a source fit for Assign() from fixer_util - """ - children = [] - pre = unicode(num_pre) - post = unicode(num_post) - # This code builds the assignment source from lib2to3 tree primitives. - # It's not very readable, but it seems like the most correct way to do it. - if num_pre > 0: - pre_part = Node(syms.power, [Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Leaf(token.COLON, u":"), Number(pre)]), Leaf(token.RSQB, u"]")])]) - children.append(pre_part) - children.append(Leaf(token.PLUS, u"+", prefix=u" ")) - main_part = Node(syms.power, [Leaf(token.LSQB, u"[", prefix=u" "), Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Number(pre) if num_pre > 0 else Leaf(1, u""), Leaf(token.COLON, u":"), Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]) if num_post > 0 else Leaf(1, u"")]), Leaf(token.RSQB, u"]"), Leaf(token.RSQB, u"]")])]) - children.append(main_part) - if num_post > 0: - children.append(Leaf(token.PLUS, u"+", prefix=u" ")) - post_part = Node(syms.power, [Name(LISTNAME, prefix=u" "), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]), Leaf(token.COLON, u":")]), Leaf(token.RSQB, u"]")])]) - children.append(post_part) - source = Node(syms.arith_expr, children) - return source - -class FixUnpacking(fixer_base.BaseFix): - - PATTERN = u""" - expl=expr_stmt< testlist_star_expr< - pre=(any ',')* - star_expr< '*' name=NAME > - post=(',' any)* [','] > '=' source=any > | - impl=for_stmt< 'for' lst=exprlist< - pre=(any ',')* - star_expr< '*' name=NAME > - post=(',' any)* [','] > 'in' it=any ':' suite=any>""" - - def fix_explicit_context(self, node, results): - pre, name, post, source = (results.get(n) for n in (u"pre", u"name", u"post", u"source")) - pre = [n.clone() for n in pre if n.type == token.NAME] - name.prefix = u" " - post = [n.clone() for n in post if n.type == token.NAME] - target = [n.clone() for n in commatize(pre + [name.clone()] + post)] - # to make the special-case fix for "*z, = ..." correct with the least - # amount of modification, make the left-side into a guaranteed tuple - target.append(Comma()) - source.prefix = u"" - setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [source.clone()])) - power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME)) - return setup_line, power_line - - def fix_implicit_context(self, node, results): - u""" - Only example of the implicit context is - a for loop, so only fix that. - """ - pre, name, post, it = (results.get(n) for n in (u"pre", u"name", u"post", u"it")) - pre = [n.clone() for n in pre if n.type == token.NAME] - name.prefix = u" " - post = [n.clone() for n in post if n.type == token.NAME] - target = [n.clone() for n in commatize(pre + [name.clone()] + post)] - # to make the special-case fix for "*z, = ..." correct with the least - # amount of modification, make the left-side into a guaranteed tuple - target.append(Comma()) - source = it.clone() - source.prefix = u"" - setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [Name(self.ITERNAME)])) - power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME)) - return setup_line, power_line - - def transform(self, node, results): - u""" - a,b,c,d,e,f,*g,h,i = range(100) changes to - _3to2list = list(range(100)) - a,b,c,d,e,f,g,h,i, = _3to2list[:6] + [_3to2list[6:-2]] + _3to2list[-2:] - - and - - for a,b,*c,d,e in iter_of_iters: do_stuff changes to - for _3to2iter in iter_of_iters: - _3to2list = list(_3to2iter) - a,b,c,d,e, = _3to2list[:2] + [_3to2list[2:-2]] + _3to2list[-2:] - do_stuff - """ - self.LISTNAME = self.new_name(u"_3to2list") - self.ITERNAME = self.new_name(u"_3to2iter") - expl, impl = results.get(u"expl"), results.get(u"impl") - if expl is not None: - setup_line, power_line = self.fix_explicit_context(node, results) - setup_line.prefix = expl.prefix - power_line.prefix = indentation(expl.parent) - setup_line.append_child(Newline()) - parent = node.parent - i = node.remove() - parent.insert_child(i, power_line) - parent.insert_child(i, setup_line) - elif impl is not None: - setup_line, power_line = self.fix_implicit_context(node, results) - suitify(node) - suite = [k for k in node.children if k.type == syms.suite][0] - setup_line.prefix = u"" - power_line.prefix = suite.children[1].value - suite.children[2].prefix = indentation(suite.children[2]) - suite.insert_child(2, Newline()) - suite.insert_child(2, power_line) - suite.insert_child(2, Newline()) - suite.insert_child(2, setup_line) - results.get(u"lst").replace(Name(self.ITERNAME, prefix=u" ")) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/main.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/main.py deleted file mode 100755 index 4179174..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/libpasteurize/main.py +++ /dev/null @@ -1,204 +0,0 @@ -""" -pasteurize: automatic conversion of Python 3 code to clean 2/3 code -=================================================================== - -``pasteurize`` attempts to convert existing Python 3 code into source-compatible -Python 2 and 3 code. - -Use it like this on Python 3 code: - - $ pasteurize --verbose mypython3script.py - -This removes any Py3-only syntax (e.g. new metaclasses) and adds these -import lines: - - from __future__ import absolute_import - from __future__ import division - from __future__ import print_function - from __future__ import unicode_literals - from future import standard_library - standard_library.install_hooks() - from builtins import * - -To write changes to the files, use the -w flag. - -It also adds any other wrappers needed for Py2/3 compatibility. - -Note that separate stages are not available (or needed) when converting from -Python 3 with ``pasteurize`` as they are when converting from Python 2 with -``futurize``. - -The --all-imports option forces adding all ``__future__`` imports, -``builtins`` imports, and standard library aliases, even if they don't -seem necessary for the current state of each module. (This can simplify -testing, and can reduce the need to think about Py2 compatibility when editing -the code further.) - -""" - -from __future__ import (absolute_import, print_function, unicode_literals) - -import sys -import logging -import optparse -from lib2to3.main import main, warn, StdoutRefactoringTool -from lib2to3 import refactor - -from future import __version__ -from libpasteurize.fixes import fix_names - - -def main(args=None): - """Main program. - - Returns a suggested exit status (0, 1, 2). - """ - # Set up option parser - parser = optparse.OptionParser(usage="pasteurize [options] file|dir ...") - parser.add_option("-V", "--version", action="store_true", - help="Report the version number of pasteurize") - parser.add_option("-a", "--all-imports", action="store_true", - help="Adds all __future__ and future imports to each module") - parser.add_option("-f", "--fix", action="append", default=[], - help="Each FIX specifies a transformation; default: all") - parser.add_option("-j", "--processes", action="store", default=1, - type="int", help="Run 2to3 concurrently") - parser.add_option("-x", "--nofix", action="append", default=[], - help="Prevent a fixer from being run.") - parser.add_option("-l", "--list-fixes", action="store_true", - help="List available transformations") - # parser.add_option("-p", "--print-function", action="store_true", - # help="Modify the grammar so that print() is a function") - parser.add_option("-v", "--verbose", action="store_true", - help="More verbose logging") - parser.add_option("--no-diffs", action="store_true", - help="Don't show diffs of the refactoring") - parser.add_option("-w", "--write", action="store_true", - help="Write back modified files") - parser.add_option("-n", "--nobackups", action="store_true", default=False, - help="Don't write backups for modified files.") - - # Parse command line arguments - refactor_stdin = False - flags = {} - options, args = parser.parse_args(args) - fixer_pkg = 'libpasteurize.fixes' - avail_fixes = fix_names - flags["print_function"] = True - - if not options.write and options.no_diffs: - warn("not writing files and not printing diffs; that's not very useful") - if not options.write and options.nobackups: - parser.error("Can't use -n without -w") - if options.version: - print(__version__) - return 0 - if options.list_fixes: - print("Available transformations for the -f/--fix option:") - for fixname in sorted(avail_fixes): - print(fixname) - if not args: - return 0 - if not args: - print("At least one file or directory argument required.", - file=sys.stderr) - print("Use --help to show usage.", file=sys.stderr) - return 2 - if "-" in args: - refactor_stdin = True - if options.write: - print("Can't write to stdin.", file=sys.stderr) - return 2 - - # Set up logging handler - level = logging.DEBUG if options.verbose else logging.INFO - logging.basicConfig(format='%(name)s: %(message)s', level=level) - - unwanted_fixes = set() - for fix in options.nofix: - if ".fix_" in fix: - unwanted_fixes.add(fix) - else: - # Infer the full module name for the fixer. - # First ensure that no names clash (e.g. - # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah): - found = [f for f in avail_fixes - if f.endswith('fix_{0}'.format(fix))] - if len(found) > 1: - print("Ambiguous fixer name. Choose a fully qualified " - "module name instead from these:\n" + - "\n".join(" " + myf for myf in found), - file=sys.stderr) - return 2 - elif len(found) == 0: - print("Unknown fixer. Use --list-fixes or -l for a list.", - file=sys.stderr) - return 2 - unwanted_fixes.add(found[0]) - - extra_fixes = set() - if options.all_imports: - prefix = 'libpasteurize.fixes.' - extra_fixes.add(prefix + 'fix_add_all__future__imports') - extra_fixes.add(prefix + 'fix_add_future_standard_library_import') - extra_fixes.add(prefix + 'fix_add_all_future_builtins') - - explicit = set() - if options.fix: - all_present = False - for fix in options.fix: - if fix == 'all': - all_present = True - else: - if ".fix_" in fix: - explicit.add(fix) - else: - # Infer the full module name for the fixer. - # First ensure that no names clash (e.g. - # lib2to3.fixes.fix_blah and libpasteurize.fixes.fix_blah): - found = [f for f in avail_fixes - if f.endswith('fix_{0}'.format(fix))] - if len(found) > 1: - print("Ambiguous fixer name. Choose a fully qualified " - "module name instead from these:\n" + - "\n".join(" " + myf for myf in found), - file=sys.stderr) - return 2 - elif len(found) == 0: - print("Unknown fixer. Use --list-fixes or -l for a list.", - file=sys.stderr) - return 2 - explicit.add(found[0]) - if len(explicit & unwanted_fixes) > 0: - print("Conflicting usage: the following fixers have been " - "simultaneously requested and disallowed:\n" + - "\n".join(" " + myf for myf in (explicit & unwanted_fixes)), - file=sys.stderr) - return 2 - requested = avail_fixes.union(explicit) if all_present else explicit - else: - requested = avail_fixes.union(explicit) - - fixer_names = requested | extra_fixes - unwanted_fixes - - # Initialize the refactoring tool - rt = StdoutRefactoringTool(sorted(fixer_names), flags, set(), - options.nobackups, not options.no_diffs) - - # Refactor all files and directories passed as arguments - if not rt.errors: - if refactor_stdin: - rt.refactor_stdin() - else: - try: - rt.refactor(args, options.write, None, - options.processes) - except refactor.MultiprocessingUnsupported: - assert options.processes > 1 - print("Sorry, -j isn't " \ - "supported on this platform.", file=sys.stderr) - return 1 - rt.summarize() - - # Return error status (0 if rt.errors is zero) - return int(bool(rt.errors)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/__init__.py old mode 100755 new mode 100644 index 2f150a2..d733921 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/__init__.py @@ -1,8 +1,8 @@ # mako/__init__.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -__version__ = "1.1.0" +__version__ = "1.2.4" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/_ast_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/_ast_util.py old mode 100755 new mode 100644 index 74c0851..9574283 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/_ast_util.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/_ast_util.py @@ -1,5 +1,5 @@ # mako/_ast_util.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -47,7 +47,6 @@ from _ast import UAdd from _ast import USub -from mako.compat import arg_stringname BOOLOP_SYMBOLS = {And: "and", Or: "or"} @@ -94,9 +93,7 @@ def parse(expr, filename="", mode="exec"): def iter_fields(node): """Iterate over all fields of a node, only yielding existing fields.""" - # CPython 2.5 compat - if not hasattr(node, "_fields") or not node._fields: - return + for field in node._fields: try: yield field, getattr(node, field) @@ -104,7 +101,7 @@ def iter_fields(node): pass -class NodeVisitor(object): +class NodeVisitor: """ Walks the abstract syntax tree and call visitor functions for every node @@ -266,10 +263,10 @@ def write_comma(): self.visit(default) if node.vararg is not None: write_comma() - self.write("*" + arg_stringname(node.vararg)) + self.write("*" + node.vararg.arg) if node.kwarg is not None: write_comma() - self.write("**" + arg_stringname(node.kwarg)) + self.write("**" + node.kwarg.arg) def decorators(self, node): for decorator in node.decorator_list: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ast.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ast.py old mode 100755 new mode 100644 index 8f2cf2e..a3f3ee3 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ast.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ast.py @@ -1,5 +1,5 @@ # mako/ast.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -9,12 +9,11 @@ import re -from mako import compat from mako import exceptions from mako import pyparser -class PythonCode(object): +class PythonCode: """represents information about a string containing Python code""" @@ -39,7 +38,7 @@ def __init__(self, code, **exception_kwargs): # - AST is less likely to break with version changes # (for example, the behavior of co_names changed a little bit # in python version 2.5) - if isinstance(code, compat.string_types): + if isinstance(code, str): expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs) else: expr = code @@ -48,7 +47,7 @@ def __init__(self, code, **exception_kwargs): f.visit(expr) -class ArgumentList(object): +class ArgumentList: """parses a fragment of code as a comma-separated list of expressions""" @@ -57,7 +56,7 @@ def __init__(self, code, **exception_kwargs): self.args = [] self.declared_identifiers = set() self.undeclared_identifiers = set() - if isinstance(code, compat.string_types): + if isinstance(code, str): if re.match(r"\S", code) and not re.match(r",\s*$", code): # if theres text and no trailing comma, insure its parsed # as a tuple by adding a trailing comma @@ -88,7 +87,7 @@ def __init__(self, code, **exception_kwargs): if not m: raise exceptions.CompileException( "Fragment '%s' is not a partial control statement" % code, - **exception_kwargs + **exception_kwargs, ) if m.group(3): code = code[: m.start(3)] @@ -97,7 +96,7 @@ def __init__(self, code, **exception_kwargs): code = code + "pass" elif keyword == "try": code = code + "pass\nexcept:pass" - elif keyword == "elif" or keyword == "else": + elif keyword in ["elif", "else"]: code = "if False:pass\n" + code + "pass" elif keyword == "except": code = "try:pass\n" + code + "pass" @@ -106,12 +105,12 @@ def __init__(self, code, **exception_kwargs): else: raise exceptions.CompileException( "Unsupported control keyword: '%s'" % keyword, - **exception_kwargs + **exception_kwargs, ) - super(PythonFragment, self).__init__(code, **exception_kwargs) + super().__init__(code, **exception_kwargs) -class FunctionDecl(object): +class FunctionDecl: """function declaration""" @@ -124,13 +123,13 @@ def __init__(self, code, allow_kwargs=True, **exception_kwargs): if not hasattr(self, "funcname"): raise exceptions.CompileException( "Code '%s' is not a function declaration" % code, - **exception_kwargs + **exception_kwargs, ) if not allow_kwargs and self.kwargs: raise exceptions.CompileException( "'**%s' keyword argument not allowed here" % self.kwargnames[-1], - **exception_kwargs + **exception_kwargs, ) def get_argument_expressions(self, as_call=False): @@ -200,6 +199,4 @@ class FunctionArgs(FunctionDecl): """the argument portion of a function declaration""" def __init__(self, code, **kwargs): - super(FunctionArgs, self).__init__( - "def ANON(%s):pass" % code, **kwargs - ) + super().__init__("def ANON(%s):pass" % code, **kwargs) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cache.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cache.py old mode 100755 new mode 100644 index b68b74f..db21bb3 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cache.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cache.py @@ -1,10 +1,9 @@ # mako/cache.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -from mako import compat from mako import util _cache_plugins = util.PluginLoader("mako.cache") @@ -13,7 +12,7 @@ register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl") -class Cache(object): +class Cache: """Represents a data content cache made available to the module space of a specific :class:`.Template` object. @@ -66,7 +65,7 @@ class Cache(object): def __init__(self, template, *args): # check for a stale template calling the # constructor - if isinstance(template, compat.string_types) and args: + if isinstance(template, str) and args: return self.template = template self.id = template.module.__name__ @@ -181,7 +180,7 @@ def _get_cache_kw(self, kw, context): return tmpl_kw -class CacheImpl(object): +class CacheImpl: """Provide a cache implementation for use by :class:`.Cache`.""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cmd.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cmd.py old mode 100755 new mode 100644 index 5d52dfb..93a6733 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cmd.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/cmd.py @@ -1,5 +1,5 @@ # mako/cmd.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -46,11 +46,17 @@ def cmdline(argv=None): parser.add_argument( "--output-encoding", default=None, help="force output encoding" ) + parser.add_argument( + "--output-file", + default=None, + help="Write to file upon successful render instead of stdout", + ) parser.add_argument("input", nargs="?", default="-") options = parser.parse_args(argv) output_encoding = options.output_encoding + output_file = options.output_file if options.input == "-": lookup_dirs = options.template_dir or ["."] @@ -78,11 +84,16 @@ def cmdline(argv=None): except: _exit() - kw = dict([varsplit(var) for var in options.var]) + kw = dict(varsplit(var) for var in options.var) try: - sys.stdout.write(template.render(**kw)) + rendered = template.render(**kw) except: _exit() + else: + if output_file: + open(output_file, "wt", encoding=output_encoding).write(rendered) + else: + sys.stdout.write(rendered) if __name__ == "__main__": diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/codegen.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/codegen.py old mode 100755 new mode 100644 index 8f9eef4..d1d2c20 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/codegen.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/codegen.py @@ -1,5 +1,5 @@ # mako/codegen.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -12,7 +12,6 @@ import time from mako import ast -from mako import compat from mako import exceptions from mako import filters from mako import parsetree @@ -25,8 +24,8 @@ # names which are hardwired into the # template and are not accessed via the # context itself -TOPLEVEL_DECLARED = set(["UNDEFINED", "STOP_RENDERING"]) -RESERVED_NAMES = set(["context", "loop"]).union(TOPLEVEL_DECLARED) +TOPLEVEL_DECLARED = {"UNDEFINED", "STOP_RENDERING"} +RESERVED_NAMES = {"context", "loop"}.union(TOPLEVEL_DECLARED) def compile( # noqa @@ -39,20 +38,12 @@ def compile( # noqa future_imports=None, source_encoding=None, generate_magic_comment=True, - disable_unicode=False, strict_undefined=False, enable_loop=True, reserved_names=frozenset(), ): """Generate module source code given a parsetree node, - uri, and optional source filename""" - - # if on Py2K, push the "source_encoding" string to be - # a bytestring itself, as we will be embedding it into - # the generated source and we don't want to coerce the - # result into a unicode object, in "disable_unicode" mode - if not compat.py3k and isinstance(source_encoding, compat.text_type): - source_encoding = source_encoding.encode(source_encoding) + uri, and optional source filename""" buf = util.FastEncodingBuffer() @@ -68,7 +59,6 @@ def compile( # noqa future_imports, source_encoding, generate_magic_comment, - disable_unicode, strict_undefined, enable_loop, reserved_names, @@ -78,7 +68,7 @@ def compile( # noqa return buf.getvalue() -class _CompileContext(object): +class _CompileContext: def __init__( self, uri, @@ -89,7 +79,6 @@ def __init__( future_imports, source_encoding, generate_magic_comment, - disable_unicode, strict_undefined, enable_loop, reserved_names, @@ -102,16 +91,15 @@ def __init__( self.future_imports = future_imports self.source_encoding = source_encoding self.generate_magic_comment = generate_magic_comment - self.disable_unicode = disable_unicode self.strict_undefined = strict_undefined self.enable_loop = enable_loop self.reserved_names = reserved_names -class _GenerateRenderMethod(object): +class _GenerateRenderMethod: """A template visitor object which generates the - full module source for a template. + full module source for a template. """ @@ -196,7 +184,7 @@ def write_toplevel(self): self.compiler.pagetag = None - class FindTopLevel(object): + class FindTopLevel: def visitInheritTag(s, node): inherit.append(node) @@ -392,7 +380,7 @@ def write_namespaces(self, namespaces): identifiers = self.compiler.identifiers.branch(node) self.in_def = True - class NSDefVisitor(object): + class NSDefVisitor: def visitDefTag(s, node): s.visitDefOrBase(node) @@ -404,7 +392,7 @@ def visitDefOrBase(s, node): raise exceptions.CompileException( "Can't put anonymous blocks inside " "<%namespace>", - **node.exception_kwargs + **node.exception_kwargs, ) self.write_inline_def(node, identifiers, nested=False) export.append(node.funcname) @@ -481,7 +469,7 @@ def write_variable_declares(self, identifiers, toplevel=False, limit=None): """ # collection of all defs available to us in this scope - comp_idents = dict([(c.funcname, c) for c in identifiers.defs]) + comp_idents = {c.funcname: c for c in identifiers.defs} to_write = set() # write "context.get()" for all variables we are going to @@ -714,7 +702,7 @@ def write_cache_decorator( toplevel=False, ): """write a post-function decorator to replace a rendering - callable with a cached version of itself.""" + callable with a cached version of itself.""" self.printer.writeline("__M_%s = %s" % (name, name)) cachekey = node_or_pagetag.parsed_attributes.get( @@ -794,8 +782,6 @@ def create_filter_callable(self, args, target, is_expression): def locate_encode(name): if re.match(r"decode\..+", name): return "filters." + name - elif self.compiler.disable_unicode: - return filters.NON_UNICODE_ESCAPES.get(name, name) else: return filters.DEFAULT_ESCAPES.get(name, name) @@ -859,11 +845,11 @@ def visitControlLine(self, node): # and end control lines, and # 3) any control line with no content other than comments if not children or ( - compat.all( + all( isinstance(c, (parsetree.Comment, parsetree.ControlLine)) for c in children ) - and compat.all( + and all( (node.is_ternary(c.keyword) or c.isend) for c in children if isinstance(c, parsetree.ControlLine) @@ -969,7 +955,7 @@ def visitCallTag(self, node): self.identifier_stack.append(body_identifiers) - class DefVisitor(object): + class DefVisitor: def visitDefTag(s, node): s.visitDefOrBase(node) @@ -1025,7 +1011,7 @@ def visitDefOrBase(s, node): ) -class _Identifiers(object): +class _Identifiers: """tracks the status of identifier names as template code is rendered.""" @@ -1098,7 +1084,7 @@ def __init__(self, compiler, node=None, parent=None, nested=False): def branch(self, node, **kwargs): """create a new Identifiers for a new Node, with - this Identifiers as the parent.""" + this Identifiers as the parent.""" return _Identifiers(self.compiler, node, self, **kwargs) @@ -1123,7 +1109,7 @@ def __repr__(self): def check_declared(self, node): """update the state of this Identifiers with the undeclared - and declared identifiers of the given node.""" + and declared identifiers of the given node.""" for ident in node.undeclared_identifiers(): if ident != "context" and ident not in self.declared.union( @@ -1170,7 +1156,7 @@ def _check_name_exists(self, collection, node): raise exceptions.CompileException( "%%def or %%block named '%s' already " "exists in this template." % node.funcname, - **node.exception_kwargs + **node.exception_kwargs, ) def visitDefTag(self, node): @@ -1200,7 +1186,7 @@ def visitBlockTag(self, node): raise exceptions.CompileException( "Named block '%s' not allowed inside of def '%s'" % (node.name, self.node.name), - **node.exception_kwargs + **node.exception_kwargs, ) elif isinstance( self.node, (parsetree.CallTag, parsetree.CallNamespaceTag) @@ -1208,7 +1194,7 @@ def visitBlockTag(self, node): raise exceptions.CompileException( "Named block '%s' not allowed inside of <%%call> tag" % (node.name,), - **node.exception_kwargs + **node.exception_kwargs, ) for ident in node.undeclared_identifiers(): @@ -1265,8 +1251,13 @@ def visitCallTag(self, node): _FOR_LOOP = re.compile( - r"^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*" - r"(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):" + r"^for\s+((?:\(?)\s*" + r"(?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*" + r"(?:\s*,\s*(?:[A-Za-z_][A-Za-z_0-9]*),??)*\s*(?:\)?)" + r"(?:\s*,\s*(?:" + r"(?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*" + r"(?:\s*,\s*(?:[A-Za-z_][A-Za-z_0-9]*),??)*\s*(?:\)?)" + r"),??)*\s*(?:\)?))\s+in\s+(.*):" ) @@ -1293,7 +1284,7 @@ def mangle_mako_loop(node, printer): return text -class LoopVariable(object): +class LoopVariable: """A node visitor which looks for the name 'loop' within undeclared identifiers.""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/compat.py old mode 100755 new mode 100644 index 4460fde..48df30d --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/compat.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/compat.py @@ -1,19 +1,17 @@ # mako/compat.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import collections +from importlib import util import inspect import sys -py3k = sys.version_info >= (3, 0) -py2k = sys.version_info < (3,) -py27 = sys.version_info >= (2, 7) -jython = sys.platform.startswith("java") win32 = sys.platform.startswith("win") pypy = hasattr(sys, "pypy_version_info") +py38 = sys.version_info >= (3, 8) ArgSpec = collections.namedtuple( "ArgSpec", ["args", "varargs", "keywords", "defaults"] @@ -26,15 +24,15 @@ def inspect_getargspec(func): if inspect.ismethod(func): func = func.__func__ if not inspect.isfunction(func): - raise TypeError("{!r} is not a Python function".format(func)) + raise TypeError(f"{func!r} is not a Python function") co = func.__code__ if not inspect.iscode(co): - raise TypeError("{!r} is not a code object".format(co)) + raise TypeError(f"{co!r} is not a code object") nargs = co.co_argcount names = co.co_varnames - nkwargs = co.co_kwonlyargcount if py3k else 0 + nkwargs = co.co_kwonlyargcount args = list(names[:nargs]) nargs += nkwargs @@ -49,118 +47,30 @@ def inspect_getargspec(func): return ArgSpec(args, varargs, varkw, func.__defaults__) -if py3k: - from io import StringIO - import builtins as compat_builtins - from urllib.parse import quote_plus, unquote_plus - from html.entities import codepoint2name, name2codepoint - - string_types = (str,) - binary_type = bytes - text_type = str - - from io import BytesIO as byte_buffer - - def u(s): - return s - - def b(s): - return s.encode("latin-1") - - def octal(lit): - return eval("0o" + lit) - - -else: - import __builtin__ as compat_builtins # noqa - - try: - from cStringIO import StringIO - except: - from StringIO import StringIO - - byte_buffer = StringIO - - from urllib import quote_plus, unquote_plus # noqa - from htmlentitydefs import codepoint2name, name2codepoint # noqa - - string_types = (basestring,) # noqa - binary_type = str - text_type = unicode # noqa - - def u(s): - return unicode(s, "utf-8") # noqa - - def b(s): - return s - - def octal(lit): - return eval("0" + lit) - - -if py3k: - from importlib import machinery - - def load_module(module_id, path): - return machinery.SourceFileLoader(module_id, path).load_module() - - -else: - import imp - - def load_module(module_id, path): - fp = open(path, "rb") - try: - return imp.load_source(module_id, path, fp) - finally: - fp.close() - - -if py3k: - - def reraise(tp, value, tb=None, cause=None): - if cause is not None: - value.__cause__ = cause - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - -else: - exec( - "def reraise(tp, value, tb=None, cause=None):\n" - " raise tp, value, tb\n" - ) +def load_module(module_id, path): + spec = util.spec_from_file_location(module_id, path) + module = util.module_from_spec(spec) + spec.loader.exec_module(module) + return module def exception_as(): return sys.exc_info()[1] -all = all # noqa - - def exception_name(exc): return exc.__class__.__name__ -################################################ -# cross-compatible metaclass implementation -# Copyright (c) 2010-2012 Benjamin Peterson -def with_metaclass(meta, base=object): - """Create a base class with a metaclass.""" - return meta("%sBase" % meta.__name__, (base,), {}) - - -################################################ +if py38: + from importlib import metadata as importlib_metadata +else: + import importlib_metadata # noqa -def arg_stringname(func_arg): - """Gets the string name of a kwarg or vararg - In Python3.4 a function's args are - of _ast.arg type not _ast.name - """ - if hasattr(func_arg, "arg"): - return func_arg.arg +def importlib_metadata_get(group): + ep = importlib_metadata.entry_points() + if hasattr(ep, "select"): + return ep.select(group=group) else: - return str(func_arg) + return ep.get(group, ()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/exceptions.py old mode 100755 new mode 100644 index b6388b1..31c695f --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/exceptions.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/exceptions.py @@ -1,5 +1,5 @@ # mako/exceptions.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -68,7 +68,7 @@ class TopLevelLookupException(TemplateLookupException): pass -class RichTraceback(object): +class RichTraceback: """Pull the current exception from the ``sys`` traceback and extracts Mako-specific template information. @@ -106,7 +106,7 @@ def errorname(self): def _init_message(self): """Find a unicode representation of self.error""" try: - self.message = compat.text_type(self.error) + self.message = str(self.error) except UnicodeError: try: self.message = str(self.error) @@ -114,8 +114,8 @@ def _init_message(self): # Fallback to args as neither unicode nor # str(Exception(u'\xe6')) work in Python < 2.6 self.message = self.error.args[0] - if not isinstance(self.message, compat.text_type): - self.message = compat.text_type(self.message, "ascii", "replace") + if not isinstance(self.message, str): + self.message = str(self.message, "ascii", "replace") def _get_reformatted_records(self, records): for rec in records: @@ -139,8 +139,7 @@ def reverse_records(self): @property def reverse_traceback(self): - """Return the same data as traceback, except in reverse order. - """ + """Return the same data as traceback, except in reverse order.""" return list(self._get_reformatted_records(self.reverse_records)) @@ -166,23 +165,10 @@ def _init(self, trcback): module_source = info.code template_source = info.source template_filename = ( - info.template_filename - or info.template_uri - or filename + info.template_filename or info.template_uri or filename ) except KeyError: # A normal .py file (not a Template) - if not compat.py3k: - try: - fp = open(filename, "rb") - encoding = util.parse_encoding(fp) - fp.close() - except IOError: - encoding = None - if encoding: - line = line.decode(encoding) - else: - line = line.decode("ascii", "replace") new_trcback.append( ( filename, @@ -238,13 +224,12 @@ def _init(self, trcback): if new_trcback: try: # A normal .py file (not a Template) - fp = open(new_trcback[-1][0], "rb") - encoding = util.parse_encoding(fp) - if compat.py3k and not encoding: - encoding = "utf-8" - fp.seek(0) - self.source = fp.read() - fp.close() + with open(new_trcback[-1][0], "rb") as fp: + encoding = util.parse_encoding(fp) + if not encoding: + encoding = "utf-8" + fp.seek(0) + self.source = fp.read() if encoding: self.source = self.source.decode(encoding) except IOError: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/autohandler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/autohandler.py old mode 100755 new mode 100644 index 55afb95..5bcfc28 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/autohandler.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/autohandler.py @@ -1,5 +1,5 @@ # ext/autohandler.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/babelplugin.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/babelplugin.py old mode 100755 new mode 100644 index dbe2cd0..907d0b8 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/babelplugin.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/babelplugin.py @@ -1,10 +1,10 @@ # ext/babelplugin.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -"""gettext message extraction via Babel: http://babel.edgewall.org/""" +"""gettext message extraction via Babel: https://pypi.org/project/Babel/""" from babel.messages.extract import extract_python from mako.ext.extract import MessageExtractor @@ -15,12 +15,12 @@ def __init__(self, keywords, comment_tags, options): self.keywords = keywords self.options = options self.config = { - "comment-tags": u" ".join(comment_tags), + "comment-tags": " ".join(comment_tags), "encoding": options.get( "input_encoding", options.get("encoding", None) ), } - super(BabelMakoExtractor, self).__init__() + super().__init__() def __call__(self, fileobj): return self.process_file(fileobj) @@ -54,5 +54,4 @@ def extract(fileobj, keywords, comment_tags, options): :rtype: ``iterator`` """ extractor = BabelMakoExtractor(keywords, comment_tags, options) - for message in extractor(fileobj): - yield message + yield from extractor(fileobj) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/beaker_cache.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/beaker_cache.py old mode 100755 new mode 100644 index b415c9c..9aa35b0 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/beaker_cache.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/beaker_cache.py @@ -1,5 +1,5 @@ # ext/beaker_cache.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -40,7 +40,7 @@ def __init__(self, cache): _beaker_cache = cache.template.cache_args["manager"] else: _beaker_cache = beaker_cache.CacheManager() - super(BeakerCacheImpl, self).__init__(cache) + super().__init__(cache) def _get_cache(self, **kw): expiretime = kw.pop("timeout", None) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/extract.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/extract.py old mode 100755 new mode 100644 index 8a1bd54..9d33ee1 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/extract.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/extract.py @@ -1,23 +1,25 @@ # ext/extract.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php +from io import BytesIO +from io import StringIO import re -from mako import compat from mako import lexer from mako import parsetree -class MessageExtractor(object): +class MessageExtractor: + use_bytes = True + def process_file(self, fileobj): template_node = lexer.Lexer( fileobj.read(), input_encoding=self.config["encoding"] ).parse() - for extracted in self.extract_nodes(template_node.get_children()): - yield extracted + yield from self.extract_nodes(template_node.get_children()) def extract_nodes(self, nodes): translator_comments = [] @@ -90,7 +92,7 @@ def extract_nodes(self, nodes): comment[1] for comment in translator_comments ] - if isinstance(code, compat.text_type): + if isinstance(code, str) and self.use_bytes: code = code.encode(input_encoding, "backslashreplace") used_translator_comments = False @@ -99,7 +101,10 @@ def extract_nodes(self, nodes): # input string of the input is non-ascii) # Also, because we added it, we have to subtract one from # node.lineno - code = compat.byte_buffer(compat.b("\n") + code) + if self.use_bytes: + code = BytesIO(b"\n" + code) + else: + code = StringIO("\n" + code) for message in self.process_python( code, node.lineno - 1, translator_strings @@ -112,8 +117,7 @@ def extract_nodes(self, nodes): in_translator_comments = False if child_nodes: - for extracted in self.extract_nodes(child_nodes): - yield extracted + yield from self.extract_nodes(child_nodes) @staticmethod def _split_comment(lineno, comment): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/linguaplugin.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/linguaplugin.py old mode 100755 new mode 100644 index 955a5cb..efb04c7 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/linguaplugin.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/linguaplugin.py @@ -1,23 +1,23 @@ # ext/linguaplugin.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php +import contextlib import io from lingua.extractors import Extractor from lingua.extractors import get_extractor from lingua.extractors import Message -from mako import compat from mako.ext.extract import MessageExtractor class LinguaMakoExtractor(Extractor, MessageExtractor): - """Mako templates""" + use_bytes = False extensions = [".mako"] default_config = {"encoding": "utf-8", "comment-tags": ""} @@ -26,21 +26,21 @@ def __call__(self, filename, options, fileobj=None): self.filename = filename self.python_extractor = get_extractor("x.py") if fileobj is None: - fileobj = open(filename, "rb") - return self.process_file(fileobj) + ctx = open(filename, "r") + else: + ctx = contextlib.nullcontext(fileobj) + with ctx as file_: + yield from self.process_file(file_) def process_python(self, code, code_lineno, translator_strings): source = code.getvalue().strip() - if source.endswith(compat.b(":")): - if source in ( - compat.b("try:"), - compat.b("else:"), - ) or source.startswith(compat.b("except")): - source = compat.b("") # Ignore try/except and else - elif source.startswith(compat.b("elif")): + if source.endswith(":"): + if source in ("try:", "else:") or source.startswith("except"): + source = "" # Ignore try/except and else + elif source.startswith("elif"): source = source[2:] # Replace "elif" with "if" - source += compat.b("pass") - code = io.BytesIO(source) + source += "pass" + code = io.StringIO(source) for msg in self.python_extractor( self.filename, self.options, code, code_lineno - 1 ): @@ -50,7 +50,7 @@ def process_python(self, code, code_lineno, translator_strings): msg.msgid, msg.msgid_plural, msg.flags, - compat.u(" ").join(translator_strings + [msg.comment]), + " ".join(translator_strings + [msg.comment]), msg.tcomment, msg.location, ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/preprocessors.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/preprocessors.py old mode 100755 new mode 100644 index 1eeb7c5..80403ec --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/preprocessors.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/preprocessors.py @@ -1,5 +1,5 @@ # ext/preprocessors.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/pygmentplugin.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/pygmentplugin.py old mode 100755 new mode 100644 index 1734ccd..9acbf4c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/pygmentplugin.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/pygmentplugin.py @@ -1,5 +1,5 @@ # ext/pygmentplugin.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -25,8 +25,6 @@ from pygments.token import String from pygments.token import Text -from mako import compat - class MakoLexer(RegexLexer): name = "Mako" @@ -108,7 +106,7 @@ class MakoHtmlLexer(DelegatingLexer): aliases = ["html+mako"] def __init__(self, **options): - super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, **options) + super().__init__(HtmlLexer, MakoLexer, **options) class MakoXmlLexer(DelegatingLexer): @@ -116,7 +114,7 @@ class MakoXmlLexer(DelegatingLexer): aliases = ["xml+mako"] def __init__(self, **options): - super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, **options) + super().__init__(XmlLexer, MakoLexer, **options) class MakoJavascriptLexer(DelegatingLexer): @@ -124,9 +122,7 @@ class MakoJavascriptLexer(DelegatingLexer): aliases = ["js+mako", "javascript+mako"] def __init__(self, **options): - super(MakoJavascriptLexer, self).__init__( - JavascriptLexer, MakoLexer, **options - ) + super().__init__(JavascriptLexer, MakoLexer, **options) class MakoCssLexer(DelegatingLexer): @@ -134,7 +130,7 @@ class MakoCssLexer(DelegatingLexer): aliases = ["css+mako"] def __init__(self, **options): - super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, **options) + super().__init__(CssLexer, MakoLexer, **options) pygments_html_formatter = HtmlFormatter( @@ -144,10 +140,7 @@ def __init__(self, **options): def syntax_highlight(filename="", language=None): mako_lexer = MakoLexer() - if compat.py3k: - python_lexer = Python3Lexer() - else: - python_lexer = PythonLexer() + python_lexer = Python3Lexer() if filename.startswith("memory:") or language == "mako": return lambda string: highlight( string, mako_lexer, pygments_html_formatter diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/turbogears.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/turbogears.py old mode 100755 new mode 100644 index fdb7741..2ebf074 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/turbogears.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/ext/turbogears.py @@ -1,5 +1,5 @@ # ext/turbogears.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -9,7 +9,7 @@ from mako.template import Template -class TGPlugin(object): +class TGPlugin: """TurboGears compatible Template Plugin.""" @@ -51,7 +51,7 @@ def load_template(self, templatename, template_string=None): def render( self, info, format="html", fragment=False, template=None # noqa ): - if isinstance(template, compat.string_types): + if isinstance(template, str): template = self.load_template(template) # Load extra vars func if provided diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/filters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/filters.py old mode 100755 new mode 100644 index ba69fdd..af202f3 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/filters.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/filters.py @@ -1,18 +1,19 @@ # mako/filters.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import codecs +from html.entities import codepoint2name +from html.entities import name2codepoint import re +from urllib.parse import quote_plus -from mako import compat -from mako.compat import codepoint2name -from mako.compat import name2codepoint -from mako.compat import quote_plus -from mako.compat import unquote_plus +import markupsafe + +html_escape = markupsafe.escape xml_escapes = { "&": "&", @@ -22,27 +23,6 @@ "'": "'", # also ' in html-only } -# XXX: " is valid in HTML and XML -# ' is not valid HTML, but is valid XML - - -def legacy_html_escape(s): - """legacy HTML escape for non-unicode mode.""" - s = s.replace("&", "&") - s = s.replace(">", ">") - s = s.replace("<", "<") - s = s.replace('"', """) - s = s.replace("'", "'") - return s - - -try: - import markupsafe - - html_escape = markupsafe.escape -except ImportError: - html_escape = legacy_html_escape - def xml_escape(string): return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string) @@ -54,31 +34,19 @@ def url_escape(string): return quote_plus(string) -def legacy_url_escape(string): - # convert into a list of octets - return quote_plus(string) - - -def url_unescape(string): - text = unquote_plus(string) - if not is_ascii_str(text): - text = text.decode("utf8") - return text - - def trim(string): return string.strip() -class Decode(object): +class Decode: def __getattr__(self, key): def decode(x): - if isinstance(x, compat.text_type): + if isinstance(x, str): return x - elif not isinstance(x, compat.binary_type): + elif not isinstance(x, bytes): return decode(str(x)) else: - return compat.text_type(x, encoding=key) + return str(x, encoding=key) return decode @@ -86,24 +54,11 @@ def decode(x): decode = Decode() -_ASCII_re = re.compile(r"\A[\x00-\x7f]*\Z") - - -def is_ascii_str(text): - return isinstance(text, str) and _ASCII_re.match(text) - - -################################################################ - - -class XMLEntityEscaper(object): +class XMLEntityEscaper: def __init__(self, codepoint2name, name2codepoint): - self.codepoint2entity = dict( - [ - (c, compat.text_type("&%s;" % n)) - for c, n in codepoint2name.items() - ] - ) + self.codepoint2entity = { + c: str("&%s;" % n) for c, n in codepoint2name.items() + } self.name2codepoint = name2codepoint def escape_entities(self, text): @@ -111,7 +66,7 @@ def escape_entities(self, text): Only characters corresponding to a named entity are replaced. """ - return compat.text_type(text).translate(self.codepoint2entity) + return str(text).translate(self.codepoint2entity) def __escape(self, m): codepoint = ord(m.group()) @@ -131,9 +86,7 @@ def escape(self, text): The return value is guaranteed to be ASCII. """ - return self.__escapable.sub( - self.__escape, compat.text_type(text) - ).encode("ascii") + return self.__escapable.sub(self.__escape, str(text)).encode("ascii") # XXX: This regexp will not match all valid XML entity names__. # (It punts on details involving involving CombiningChars and Extenders.) @@ -183,37 +136,28 @@ def htmlentityreplace_errors(ex): characters with HTML entities, or, if no HTML entity exists for the character, XML character references:: - >>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace') + >>> 'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace') 'The cost was €12.' """ if isinstance(ex, UnicodeEncodeError): # Handle encoding errors bad_text = ex.object[ex.start : ex.end] text = _html_entities_escaper.escape(bad_text) - return (compat.text_type(text), ex.end) + return (str(text), ex.end) raise ex codecs.register_error("htmlentityreplace", htmlentityreplace_errors) -# TODO: options to make this dynamic per-compilation will be added in a later -# release DEFAULT_ESCAPES = { "x": "filters.xml_escape", "h": "filters.html_escape", "u": "filters.url_escape", "trim": "filters.trim", "entity": "filters.html_entities_escape", - "unicode": "unicode", + "unicode": "str", "decode": "decode", "str": "str", "n": "n", } - -if compat.py3k: - DEFAULT_ESCAPES.update({"unicode": "str"}) - -NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy() -NON_UNICODE_ESCAPES["h"] = "filters.legacy_html_escape" -NON_UNICODE_ESCAPES["u"] = "filters.legacy_url_escape" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lexer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lexer.py old mode 100755 new mode 100644 index dadd663..75182f8 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lexer.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lexer.py @@ -1,5 +1,5 @@ # mako/lexer.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -9,7 +9,6 @@ import codecs import re -from mako import compat from mako import exceptions from mako import parsetree from mako.pygen import adjust_whitespace @@ -17,14 +16,9 @@ _regexp_cache = {} -class Lexer(object): +class Lexer: def __init__( - self, - text, - filename=None, - disable_unicode=False, - input_encoding=None, - preprocessor=None, + self, text, filename=None, input_encoding=None, preprocessor=None ): self.text = text self.filename = filename @@ -36,14 +30,8 @@ def __init__( self.tag = [] self.control_line = [] self.ternary_stack = [] - self.disable_unicode = disable_unicode self.encoding = input_encoding - if compat.py3k and disable_unicode: - raise exceptions.UnsupportedError( - "Mako for Python 3 does not " "support disabling Unicode" - ) - if preprocessor is None: self.preprocessor = [] elif not hasattr(preprocessor, "__iter__"): @@ -66,10 +54,7 @@ def match(self, regexp, flags=None): try: reg = _regexp_cache[(regexp, flags)] except KeyError: - if flags: - reg = re.compile(regexp, flags) - else: - reg = re.compile(regexp) + reg = re.compile(regexp, flags) if flags else re.compile(regexp) _regexp_cache[(regexp, flags)] = reg return self.match_reg(reg) @@ -87,21 +72,13 @@ def match_reg(self, reg): match = reg.match(self.text, self.match_position) if match: (start, end) = match.span() - if end == start: - self.match_position = end + 1 - else: - self.match_position = end + self.match_position = end + 1 if end == start else end self.matched_lineno = self.lineno - lines = re.findall(r"\n", self.text[mp : self.match_position]) cp = mp - 1 - while cp >= 0 and cp < self.textlength and self.text[cp] != "\n": - cp -= 1 + if cp >= 0 and cp < self.textlength: + cp = self.text[: cp + 1].rfind("\n") self.matched_charpos = mp - cp - self.lineno += len(lines) - # print "MATCHED:", match.group(0), "LINE START:", - # self.matched_lineno, "LINE END:", self.lineno - # print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \ - # (match and "TRUE" or "FALSE") + self.lineno += self.text[mp : self.match_position].count("\n") return match def parse_until_text(self, watch_nesting, *text): @@ -161,12 +138,15 @@ def append_node(self, nodecls, *args, **kwargs): if self.control_line: control_frame = self.control_line[-1] control_frame.nodes.append(node) - if not ( - isinstance(node, parsetree.ControlLine) - and control_frame.is_ternary(node.keyword) + if ( + not ( + isinstance(node, parsetree.ControlLine) + and control_frame.is_ternary(node.keyword) + ) + and self.ternary_stack + and self.ternary_stack[-1] ): - if self.ternary_stack and self.ternary_stack[-1]: - self.ternary_stack[-1][-1].nodes.append(node) + self.ternary_stack[-1][-1].nodes.append(node) if isinstance(node, parsetree.Tag): if len(self.tag): node.parent = self.tag[-1] @@ -188,20 +168,20 @@ def append_node(self, nodecls, *args, **kwargs): raise exceptions.SyntaxException( "Keyword '%s' not a legal ternary for keyword '%s'" % (node.keyword, self.control_line[-1].keyword), - **self.exception_kwargs + **self.exception_kwargs, ) _coding_re = re.compile(r"#.*coding[:=]\s*([-\w.]+).*\r?\n") def decode_raw_stream(self, text, decode_raw, known_encoding, filename): """given string/unicode or bytes/string, determine encoding - from magic encoding comment, return body as unicode - or raw if decode_raw=False + from magic encoding comment, return body as unicode + or raw if decode_raw=False """ - if isinstance(text, compat.text_type): + if isinstance(text, str): m = self._coding_re.match(text) - encoding = m and m.group(1) or known_encoding or "ascii" + encoding = m and m.group(1) or known_encoding or "utf-8" return encoding, text if text.startswith(codecs.BOM_UTF8): @@ -219,11 +199,7 @@ def decode_raw_stream(self, text, decode_raw, known_encoding, filename): ) else: m = self._coding_re.match(text.decode("utf-8", "ignore")) - if m: - parsed_encoding = m.group(1) - else: - parsed_encoding = known_encoding or "ascii" - + parsed_encoding = m.group(1) if m else known_encoding or "utf-8" if decode_raw: try: text = text.decode(parsed_encoding) @@ -241,7 +217,7 @@ def decode_raw_stream(self, text, decode_raw, known_encoding, filename): def parse(self): self.encoding, self.text = self.decode_raw_stream( - self.text, not self.disable_unicode, self.encoding, self.filename + self.text, True, self.encoding, self.filename ) for preproc in self.preprocessor: @@ -276,12 +252,13 @@ def parse(self): if self.match_position > self.textlength: break - raise exceptions.CompileException("assertion failed") + # TODO: no coverage here + raise exceptions.MakoException("assertion failed") if len(self.tag): raise exceptions.SyntaxException( "Unclosed tag: <%%%s>" % self.tag[-1].keyword, - **self.exception_kwargs + **self.exception_kwargs, ) if len(self.control_line): raise exceptions.SyntaxException( @@ -295,66 +272,69 @@ def parse(self): return self.template def match_tag_start(self): - match = self.match( - r""" + reg = r""" \<% # opening tag ([\w\.\:]+) # keyword - ((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \ + ((?:\s+\w+|\s*=\s*|"[^"]*?"|'[^']*?'|\s*,\s*)*) # attrname, = \ # sign, string expression + # comma is for backwards compat + # identified in #366 \s* # more whitespace (/)?> # closing - """, + """ + + match = self.match( + reg, re.I | re.S | re.X, ) - if match: - keyword, attr, isend = match.groups() - self.keyword = keyword - attributes = {} - if attr: - for att in re.findall( - r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr - ): - key, val1, val2 = att - text = val1 or val2 - text = text.replace("\r\n", "\n") - attributes[key] = text - self.append_node(parsetree.Tag, keyword, attributes) - if isend: - self.tag.pop() - else: - if keyword == "text": - match = self.match(r"(.*?)(?=\)", re.S) - if not match: - raise exceptions.SyntaxException( - "Unclosed tag: <%%%s>" % self.tag[-1].keyword, - **self.exception_kwargs - ) - self.append_node(parsetree.Text, match.group(1)) - return self.match_tag_end() - return True - else: + if not match: return False + keyword, attr, isend = match.groups() + self.keyword = keyword + attributes = {} + if attr: + for att in re.findall( + r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr + ): + key, val1, val2 = att + text = val1 or val2 + text = text.replace("\r\n", "\n") + attributes[key] = text + self.append_node(parsetree.Tag, keyword, attributes) + if isend: + self.tag.pop() + elif keyword == "text": + match = self.match(r"(.*?)(?=\)", re.S) + if not match: + raise exceptions.SyntaxException( + "Unclosed tag: <%%%s>" % self.tag[-1].keyword, + **self.exception_kwargs, + ) + self.append_node(parsetree.Text, match.group(1)) + return self.match_tag_end() + return True + def match_tag_end(self): - match = self.match(r"\") + match = self.match(r"\") if match: if not len(self.tag): raise exceptions.SyntaxException( "Closing tag without opening tag: " % match.group(1), - **self.exception_kwargs + **self.exception_kwargs, ) elif self.tag[-1].keyword != match.group(1): raise exceptions.SyntaxException( "Closing tag does not match tag: <%%%s>" % (match.group(1), self.tag[-1].keyword), - **self.exception_kwargs + **self.exception_kwargs, ) self.tag.pop() return True @@ -363,15 +343,15 @@ def match_tag_end(self): def match_end(self): match = self.match(r"\Z", re.S) - if match: - string = match.group() - if string: - return string - else: - return True - else: + if not match: return False + string = match.group() + if string: + return string + else: + return True + def match_text(self): match = self.match( r""" @@ -422,63 +402,62 @@ def match_python_block(self): def match_expression(self): match = self.match(r"\${") - if match: - line, pos = self.matched_lineno, self.matched_charpos - text, end = self.parse_until_text(True, r"\|", r"}") - if end == "|": - escapes, end = self.parse_until_text(True, r"}") - else: - escapes = "" - text = text.replace("\r\n", "\n") - self.append_node( - parsetree.Expression, - text, - escapes.strip(), - lineno=line, - pos=pos, - ) - return True - else: + if not match: return False + line, pos = self.matched_lineno, self.matched_charpos + text, end = self.parse_until_text(True, r"\|", r"}") + if end == "|": + escapes, end = self.parse_until_text(True, r"}") + else: + escapes = "" + text = text.replace("\r\n", "\n") + self.append_node( + parsetree.Expression, + text, + escapes.strip(), + lineno=line, + pos=pos, + ) + return True + def match_control_line(self): match = self.match( - r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)" + r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\\r?\n)|[^\r\n])*)" r"(?:\r?\n|\Z)", re.M, ) - if match: - operator = match.group(1) - text = match.group(2) - if operator == "%": - m2 = re.match(r"(end)?(\w+)\s*(.*)", text) - if not m2: + if not match: + return False + + operator = match.group(1) + text = match.group(2) + if operator == "%": + m2 = re.match(r"(end)?(\w+)\s*(.*)", text) + if not m2: + raise exceptions.SyntaxException( + "Invalid control line: '%s'" % text, + **self.exception_kwargs, + ) + isend, keyword = m2.group(1, 2) + isend = isend is not None + + if isend: + if not len(self.control_line): raise exceptions.SyntaxException( - "Invalid control line: '%s'" % text, - **self.exception_kwargs + "No starting keyword '%s' for '%s'" % (keyword, text), + **self.exception_kwargs, ) - isend, keyword = m2.group(1, 2) - isend = isend is not None - - if isend: - if not len(self.control_line): - raise exceptions.SyntaxException( - "No starting keyword '%s' for '%s'" - % (keyword, text), - **self.exception_kwargs - ) - elif self.control_line[-1].keyword != keyword: - raise exceptions.SyntaxException( - "Keyword '%s' doesn't match keyword '%s'" - % (text, self.control_line[-1].keyword), - **self.exception_kwargs - ) - self.append_node(parsetree.ControlLine, keyword, isend, text) - else: - self.append_node(parsetree.Comment, text) - return True + elif self.control_line[-1].keyword != keyword: + raise exceptions.SyntaxException( + "Keyword '%s' doesn't match keyword '%s'" + % (text, self.control_line[-1].keyword), + **self.exception_kwargs, + ) + self.append_node(parsetree.ControlLine, keyword, isend, text) else: - return False + self.append_node(parsetree.Comment, text) + return True def match_comment(self): """matches the multiline version of a comment""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lookup.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lookup.py old mode 100755 new mode 100644 index 93558b2..f7410ce --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lookup.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/lookup.py @@ -1,5 +1,5 @@ # mako/lookup.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -8,18 +8,14 @@ import posixpath import re import stat +import threading from mako import exceptions from mako import util from mako.template import Template -try: - import threading -except: - import dummy_threading as threading - -class TemplateCollection(object): +class TemplateCollection: """Represent a collection of :class:`.Template` objects, identifiable via URI. @@ -34,7 +30,7 @@ class TemplateCollection(object): :class:`.TemplateCollection` is an abstract class, with the usual default implementation being :class:`.TemplateLookup`. - """ + """ def has_template(self, uri): """Return ``True`` if this :class:`.TemplateLookup` is @@ -68,7 +64,7 @@ def get_template(self, uri, relativeto=None): def filename_to_uri(self, uri, filename): """Convert the given ``filename`` to a URI relative to - this :class:`.TemplateCollection`.""" + this :class:`.TemplateCollection`.""" return uri @@ -161,8 +157,6 @@ def __init__( collection_size=-1, format_exceptions=False, error_handler=None, - disable_unicode=False, - bytestring_passthrough=False, output_encoding=None, encoding_errors="strict", cache_args=None, @@ -207,8 +201,6 @@ def __init__( "format_exceptions": format_exceptions, "error_handler": error_handler, "include_error_handler": include_error_handler, - "disable_unicode": disable_unicode, - "bytestring_passthrough": bytestring_passthrough, "output_encoding": output_encoding, "cache_impl": cache_impl, "encoding_errors": encoding_errors, @@ -249,7 +241,7 @@ def get_template(self, uri): return self._check(uri, self._collection[uri]) else: return self._collection[uri] - except KeyError: + except KeyError as e: u = re.sub(r"^\/+", "", uri) for dir_ in self.directories: # make sure the path seperators are posix - os.altsep is empty @@ -260,8 +252,8 @@ def get_template(self, uri): return self._load(srcfile, uri) else: raise exceptions.TopLevelLookupException( - "Cant locate template for uri %r" % uri - ) + "Can't locate template for uri %r" % uri + ) from e def adjust_uri(self, uri, relativeto): """Adjust the given ``uri`` based on the given relative URI.""" @@ -270,20 +262,19 @@ def adjust_uri(self, uri, relativeto): if key in self._uri_cache: return self._uri_cache[key] - if uri[0] != "/": - if relativeto is not None: - v = self._uri_cache[key] = posixpath.join( - posixpath.dirname(relativeto), uri - ) - else: - v = self._uri_cache[key] = "/" + uri - else: + if uri[0] == "/": v = self._uri_cache[key] = uri + elif relativeto is not None: + v = self._uri_cache[key] = posixpath.join( + posixpath.dirname(relativeto), uri + ) + else: + v = self._uri_cache[key] = "/" + uri return v def filename_to_uri(self, filename): """Convert the given ``filename`` to a URI relative to - this :class:`.TemplateCollection`.""" + this :class:`.TemplateCollection`.""" try: return self._uri_cache[filename] @@ -294,7 +285,7 @@ def filename_to_uri(self, filename): def _relativeize(self, filename): """Return the portion of a filename that is 'relative' - to the directories in this lookup. + to the directories in this lookup. """ @@ -324,7 +315,7 @@ def _load(self, filename, uri): filename=posixpath.normpath(filename), lookup=self, module_filename=module_filename, - **self.template_args + **self.template_args, ) return template except: @@ -342,16 +333,15 @@ def _check(self, uri, template): try: template_stat = os.stat(template.filename) - if template.module._modified_time < template_stat[stat.ST_MTIME]: - self._collection.pop(uri, None) - return self._load(template.filename, uri) - else: + if template.module._modified_time >= template_stat[stat.ST_MTIME]: return template - except OSError: + self._collection.pop(uri, None) + return self._load(template.filename, uri) + except OSError as e: self._collection.pop(uri, None) raise exceptions.TemplateLookupException( - "Cant locate template for uri %r" % uri - ) + "Can't locate template for uri %r" % uri + ) from e def put_string(self, uri, text): """Place a new :class:`.Template` object into this diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/parsetree.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/parsetree.py old mode 100755 new mode 100644 index 2881da1..c5a12d1 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/parsetree.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/parsetree.py @@ -1,5 +1,5 @@ # mako/parsetree.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -9,13 +9,12 @@ import re from mako import ast -from mako import compat from mako import exceptions from mako import filters from mako import util -class Node(object): +class Node: """base class for a Node in the parse tree.""" @@ -51,7 +50,7 @@ class TemplateNode(Node): """a 'container' node that stores the overall collection of nodes.""" def __init__(self, filename): - super(TemplateNode, self).__init__("", 0, 0, filename) + super().__init__("", 0, 0, filename) self.nodes = [] self.page_attributes = {} @@ -80,7 +79,7 @@ class ControlLine(Node): has_loop_context = False def __init__(self, keyword, isend, text, **kwargs): - super(ControlLine, self).__init__(**kwargs) + super().__init__(**kwargs) self.text = text self.keyword = keyword self.isend = isend @@ -107,11 +106,13 @@ def is_ternary(self, keyword): """return true if the given keyword is a ternary keyword for this ControlLine""" - return keyword in { - "if": set(["else", "elif"]), - "try": set(["except", "finally"]), - "for": set(["else"]), - }.get(self.keyword, []) + cases = { + "if": {"else", "elif"}, + "try": {"except", "finally"}, + "for": {"else"}, + } + + return keyword in cases.get(self.keyword, set()) def __repr__(self): return "ControlLine(%r, %r, %r, %r)" % ( @@ -123,11 +124,10 @@ def __repr__(self): class Text(Node): - """defines plain text in the template.""" def __init__(self, content, **kwargs): - super(Text, self).__init__(**kwargs) + super().__init__(**kwargs) self.content = content def __repr__(self): @@ -135,7 +135,6 @@ def __repr__(self): class Code(Node): - """defines a Python code block, either inline or module level. e.g.:: @@ -153,7 +152,7 @@ class Code(Node): """ def __init__(self, text, ismodule, **kwargs): - super(Code, self).__init__(**kwargs) + super().__init__(**kwargs) self.text = text self.ismodule = ismodule self.code = ast.PythonCode(text, **self.exception_kwargs) @@ -173,7 +172,6 @@ def __repr__(self): class Comment(Node): - """defines a comment line. # this is a comment @@ -181,7 +179,7 @@ class Comment(Node): """ def __init__(self, text, **kwargs): - super(Comment, self).__init__(**kwargs) + super().__init__(**kwargs) self.text = text def __repr__(self): @@ -189,7 +187,6 @@ def __repr__(self): class Expression(Node): - """defines an inline expression. ${x+y} @@ -197,7 +194,7 @@ class Expression(Node): """ def __init__(self, text, escapes, **kwargs): - super(Expression, self).__init__(**kwargs) + super().__init__(**kwargs) self.text = text self.escapes = escapes self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs) @@ -210,7 +207,7 @@ def undeclared_identifiers(self): # TODO: make the "filter" shortcut list configurable at parse/gen time return self.code.undeclared_identifiers.union( self.escapes_code.undeclared_identifiers.difference( - set(filters.DEFAULT_ESCAPES.keys()) + filters.DEFAULT_ESCAPES ) ).difference(self.code.declared_identifiers) @@ -223,7 +220,6 @@ def __repr__(self): class _TagMeta(type): - """metaclass to allow Tag to produce a subclass according to its keyword""" @@ -232,7 +228,7 @@ class _TagMeta(type): def __init__(cls, clsname, bases, dict_): if getattr(cls, "__keyword__", None) is not None: cls._classmap[cls.__keyword__] = cls - super(_TagMeta, cls).__init__(clsname, bases, dict_) + super().__init__(clsname, bases, dict_) def __call__(cls, keyword, attributes, **kwargs): if ":" in keyword: @@ -254,7 +250,7 @@ def __call__(cls, keyword, attributes, **kwargs): return type.__call__(cls, keyword, attributes, **kwargs) -class Tag(compat.with_metaclass(_TagMeta, Node)): +class Tag(Node, metaclass=_TagMeta): """abstract base class for tags. e.g.:: @@ -276,7 +272,7 @@ def __init__( expressions, nonexpressions, required, - **kwargs + **kwargs, ): r"""construct a new Tag instance. @@ -297,17 +293,20 @@ def __init__( other arguments passed to the Node superclass (lineno, pos) """ - super(Tag, self).__init__(**kwargs) + super().__init__(**kwargs) self.keyword = keyword self.attributes = attributes self._parse_attributes(expressions, nonexpressions) missing = [r for r in required if r not in self.parsed_attributes] if len(missing): raise exceptions.CompileException( - "Missing attribute(s): %s" - % ",".join([repr(m) for m in missing]), - **self.exception_kwargs + ( + "Missing attribute(s): %s" + % ",".join(repr(m) for m in missing) + ), + **self.exception_kwargs, ) + self.parent = None self.nodes = [] @@ -339,23 +338,22 @@ def _parse_attributes(self, expressions, nonexpressions): code.undeclared_identifiers ) expr.append("(%s)" % m.group(1)) - else: - if x: - expr.append(repr(x)) + elif x: + expr.append(repr(x)) self.parsed_attributes[key] = " + ".join(expr) or repr("") elif key in nonexpressions: if re.search(r"\${.+?}", self.attributes[key]): raise exceptions.CompileException( - "Attibute '%s' in tag '%s' does not allow embedded " + "Attribute '%s' in tag '%s' does not allow embedded " "expressions" % (key, self.keyword), - **self.exception_kwargs + **self.exception_kwargs, ) self.parsed_attributes[key] = repr(self.attributes[key]) else: raise exceptions.CompileException( "Invalid attribute for tag '%s': '%s'" % (self.keyword, key), - **self.exception_kwargs + **self.exception_kwargs, ) self.expression_undeclared_identifiers = undeclared_identifiers @@ -379,13 +377,13 @@ class IncludeTag(Tag): __keyword__ = "include" def __init__(self, keyword, attributes, **kwargs): - super(IncludeTag, self).__init__( + super().__init__( keyword, attributes, ("file", "import", "args"), (), ("file",), - **kwargs + **kwargs, ) self.page_args = ast.PythonCode( "__DUMMY(%s)" % attributes.get("args", ""), **self.exception_kwargs @@ -396,24 +394,22 @@ def declared_identifiers(self): def undeclared_identifiers(self): identifiers = self.page_args.undeclared_identifiers.difference( - set(["__DUMMY"]) + {"__DUMMY"} ).difference(self.page_args.declared_identifiers) - return identifiers.union( - super(IncludeTag, self).undeclared_identifiers() - ) + return identifiers.union(super().undeclared_identifiers()) class NamespaceTag(Tag): __keyword__ = "namespace" def __init__(self, keyword, attributes, **kwargs): - super(NamespaceTag, self).__init__( + super().__init__( keyword, attributes, ("file",), ("name", "inheritable", "import", "module"), (), - **kwargs + **kwargs, ) self.name = attributes.get("name", "__anon_%s" % hex(abs(id(self)))) @@ -421,12 +417,12 @@ def __init__(self, keyword, attributes, **kwargs): raise exceptions.CompileException( "'name' and/or 'import' attributes are required " "for <%namespace>", - **self.exception_kwargs + **self.exception_kwargs, ) if "file" in attributes and "module" in attributes: raise exceptions.CompileException( "<%namespace> may only have one of 'file' or 'module'", - **self.exception_kwargs + **self.exception_kwargs, ) def declared_identifiers(self): @@ -437,9 +433,7 @@ class TextTag(Tag): __keyword__ = "text" def __init__(self, keyword, attributes, **kwargs): - super(TextTag, self).__init__( - keyword, attributes, (), ("filter"), (), **kwargs - ) + super().__init__(keyword, attributes, (), ("filter"), (), **kwargs) self.filter_args = ast.ArgumentList( attributes.get("filter", ""), **self.exception_kwargs ) @@ -458,13 +452,13 @@ def __init__(self, keyword, attributes, **kwargs): c for c in attributes if c.startswith("cache_") ] - super(DefTag, self).__init__( + super().__init__( keyword, attributes, expressions, ("name", "filter", "decorator"), ("name",), - **kwargs + **kwargs, ) name = attributes["name"] if re.match(r"^[\w_]+$", name): @@ -521,19 +515,19 @@ def __init__(self, keyword, attributes, **kwargs): c for c in attributes if c.startswith("cache_") ] - super(BlockTag, self).__init__( + super().__init__( keyword, attributes, expressions, ("name", "filter", "decorator"), (), - **kwargs + **kwargs, ) name = attributes.get("name") if name and not re.match(r"^[\w_]+$", name): raise exceptions.CompileException( "%block may not specify an argument signature", - **self.exception_kwargs + **self.exception_kwargs, ) if not name and attributes.get("args", None): raise exceptions.CompileException( @@ -577,7 +571,7 @@ class CallTag(Tag): __keyword__ = "call" def __init__(self, keyword, attributes, **kwargs): - super(CallTag, self).__init__( + super().__init__( keyword, attributes, ("args"), ("expr",), ("expr",), **kwargs ) self.expression = attributes["expr"] @@ -597,26 +591,25 @@ def undeclared_identifiers(self): class CallNamespaceTag(Tag): def __init__(self, namespace, defname, attributes, **kwargs): - super(CallNamespaceTag, self).__init__( + super().__init__( namespace + ":" + defname, attributes, tuple(attributes.keys()) + ("args",), (), (), - **kwargs + **kwargs, ) self.expression = "%s.%s(%s)" % ( namespace, defname, ",".join( - [ - "%s=%s" % (k, v) - for k, v in self.parsed_attributes.items() - if k != "args" - ] + "%s=%s" % (k, v) + for k, v in self.parsed_attributes.items() + if k != "args" ), ) + self.code = ast.PythonCode(self.expression, **self.exception_kwargs) self.body_decl = ast.FunctionArgs( attributes.get("args", ""), **self.exception_kwargs @@ -635,7 +628,7 @@ class InheritTag(Tag): __keyword__ = "inherit" def __init__(self, keyword, attributes, **kwargs): - super(InheritTag, self).__init__( + super().__init__( keyword, attributes, ("file",), (), ("file",), **kwargs ) @@ -651,9 +644,7 @@ def __init__(self, keyword, attributes, **kwargs): "enable_loop", ] + [c for c in attributes if c.startswith("cache_")] - super(PageTag, self).__init__( - keyword, attributes, expressions, (), (), **kwargs - ) + super().__init__(keyword, attributes, expressions, (), (), **kwargs) self.body_decl = ast.FunctionArgs( attributes.get("args", ""), **self.exception_kwargs ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pygen.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pygen.py old mode 100755 new mode 100644 index 603676d..57c6979 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pygen.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pygen.py @@ -1,5 +1,5 @@ # mako/pygen.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -11,7 +11,7 @@ from mako import exceptions -class PythonPrinter(object): +class PythonPrinter: def __init__(self, stream): # indentation counter self.indent = 0 @@ -43,6 +43,15 @@ def __init__(self, stream): # source lines self.source_map = {} + self._re_space_comment = re.compile(r"^\s*#") + self._re_space = re.compile(r"^\s*$") + self._re_indent = re.compile(r":[ \t]*(?:#.*)?$") + self._re_compound = re.compile(r"^\s*(if|try|elif|while|for|with)") + self._re_indent_keyword = re.compile( + r"^\s*(def|class|else|elif|except|finally)" + ) + self._re_unindentor = re.compile(r"^\s*(else|elif|except|finally).*\:") + def _update_lineno(self, num): self.lineno += num @@ -86,8 +95,8 @@ def writeline(self, line): if ( line is None - or re.match(r"^\s*#", line) - or re.match(r"^\s*$", line) + or self._re_space_comment.match(line) + or self._re_space.match(line) ): hastext = False else: @@ -96,18 +105,19 @@ def writeline(self, line): is_comment = line and len(line) and line[0] == "#" # see if this line should decrease the indentation level - if not is_comment and (not hastext or self._is_unindentor(line)): - - if self.indent > 0: - self.indent -= 1 - # if the indent_detail stack is empty, the user - # probably put extra closures - the resulting - # module wont compile. - if len(self.indent_detail) == 0: - raise exceptions.SyntaxException( - "Too many whitespace closures" - ) - self.indent_detail.pop() + if ( + not is_comment + and (not hastext or self._is_unindentor(line)) + and self.indent > 0 + ): + self.indent -= 1 + # if the indent_detail stack is empty, the user + # probably put extra closures - the resulting + # module wont compile. + if len(self.indent_detail) == 0: + # TODO: no coverage here + raise exceptions.MakoException("Too many whitespace closures") + self.indent_detail.pop() if line is None: return @@ -120,12 +130,12 @@ def writeline(self, line): # note that a line can both decrase (before printing) and # then increase (after printing) the indentation level. - if re.search(r":[ \t]*(?:#.*)?$", line): + if self._re_indent.search(line): # increment indentation count, and also # keep track of what the keyword was that indented us, # if it is a python compound statement keyword # where we might have to look for an "unindent" keyword - match = re.match(r"^\s*(if|try|elif|while|for|with)", line) + match = self._re_compound.match(line) if match: # its a "compound" keyword, so we will check for "unindentors" indentor = match.group(1) @@ -136,9 +146,7 @@ def writeline(self, line): # its not a "compound" keyword. but lets also # test for valid Python keywords that might be indenting us, # else assume its a non-indenting line - m2 = re.match( - r"^\s*(def|class|else|elif|except|finally)", line - ) + m2 = self._re_indent_keyword.match(line) if m2: self.indent += 1 self.indent_detail.append(indentor) @@ -166,14 +174,11 @@ def _is_unindentor(self, line): # if the current line doesnt have one of the "unindentor" keywords, # return False - match = re.match(r"^\s*(else|elif|except|finally).*\:", line) - if not match: - return False - - # whitespace matches up, we have a compound indentor, + match = self._re_unindentor.match(line) + # if True, whitespace matches up, we have a compound indentor, # and this line has an unindentor, this # is probably good enough - return True + return bool(match) # should we decide that its not good enough, heres # more stuff to check. @@ -195,6 +200,9 @@ def _indent_line(self, line, stripspace=""): stripspace is a string of space that will be truncated from the start of the line before indenting.""" + if stripspace == "": + # Fast path optimization. + return self.indentstring * self.indent + line return re.sub( r"^%s" % stripspace, self.indentstring * self.indent, line @@ -218,11 +226,7 @@ def _in_multi_line(self, line): current_state = self.backslashed or self.triplequoted - if re.search(r"\\$", line): - self.backslashed = True - else: - self.backslashed = False - + self.backslashed = bool(re.search(r"\\$", line)) triples = len(re.findall(r"\"\"\"|\'\'\'", line)) if triples == 1 or triples % 2 != 0: self.triplequoted = not self.triplequoted diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pyparser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pyparser.py old mode 100755 new mode 100644 index e41c304..d9b090c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pyparser.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/pyparser.py @@ -1,5 +1,5 @@ # mako/pyparser.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -18,22 +18,13 @@ from mako import compat from mako import exceptions from mako import util -from mako.compat import arg_stringname -if compat.py3k: - # words that cannot be assigned to (notably - # smaller than the total keys in __builtins__) - reserved = set(["True", "False", "None", "print"]) +# words that cannot be assigned to (notably +# smaller than the total keys in __builtins__) +reserved = {"True", "False", "None", "print"} - # the "id" attribute on a function node - arg_id = operator.attrgetter("arg") -else: - # words that cannot be assigned to (notably - # smaller than the total keys in __builtins__) - reserved = set(["True", "False", "None"]) - - # the "id" attribute on a function node - arg_id = operator.attrgetter("id") +# the "id" attribute on a function node +arg_id = operator.attrgetter("arg") util.restore__ast(_ast) @@ -43,7 +34,7 @@ def parse(code, mode="exec", **exception_kwargs): try: return _ast_util.parse(code, "", mode) - except Exception: + except Exception as e: raise exceptions.SyntaxException( "(%s) %s (%r)" % ( @@ -51,8 +42,8 @@ def parse(code, mode="exec", **exception_kwargs): compat.exception_as(), code[0:50], ), - **exception_kwargs - ) + **exception_kwargs, + ) from e class FindIdentifiers(_ast_util.NodeVisitor): @@ -85,18 +76,13 @@ def visit_Assign(self, node): self.visit(n) self.in_assign_targets = in_a - if compat.py3k: - - # ExceptHandler is in Python 2, but this block only works in - # Python 3 (and is required there) - - def visit_ExceptHandler(self, node): - if node.name is not None: - self._add_declared(node.name) - if node.type is not None: - self.visit(node.type) - for statement in node.body: - self.visit(statement) + def visit_ExceptHandler(self, node): + if node.name is not None: + self._add_declared(node.name) + if node.type is not None: + self.visit(node.type) + for statement in node.body: + self.visit(statement) def visit_Lambda(self, node, *args): self._visit_function(node, True) @@ -108,8 +94,7 @@ def visit_FunctionDef(self, node): def _expand_tuples(self, args): for arg in args: if isinstance(arg, _ast.Tuple): - for n in arg.elts: - yield n + yield from arg.elts else: yield arg @@ -170,15 +155,15 @@ def visit_ImportFrom(self, node): for name in node.names: if name.asname is not None: self._add_declared(name.asname) + elif name.name == "*": + raise exceptions.CompileException( + "'import *' is not supported, since all identifier " + "names must be explicitly declared. Please use the " + "form 'from import , , " + "...' instead.", + **self.exception_kwargs, + ) else: - if name.name == "*": - raise exceptions.CompileException( - "'import *' is not supported, since all identifier " - "names must be explicitly declared. Please use the " - "form 'from import , , " - "...' instead.", - **self.exception_kwargs - ) self._add_declared(name.name) @@ -213,27 +198,20 @@ def visit_FunctionDef(self, node): argnames = [arg_id(arg) for arg in node.args.args] if node.args.vararg: - argnames.append(arg_stringname(node.args.vararg)) + argnames.append(node.args.vararg.arg) - if compat.py2k: - # kw-only args don't exist in Python 2 - kwargnames = [] - else: - kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs] + kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs] if node.args.kwarg: - kwargnames.append(arg_stringname(node.args.kwarg)) + kwargnames.append(node.args.kwarg.arg) self.listener.argnames = argnames self.listener.defaults = node.args.defaults # ast self.listener.kwargnames = kwargnames - if compat.py2k: - self.listener.kwdefaults = [] - else: - self.listener.kwdefaults = node.args.kw_defaults + self.listener.kwdefaults = node.args.kw_defaults self.listener.varargs = node.args.vararg self.listener.kwargs = node.args.kwarg -class ExpressionGenerator(object): +class ExpressionGenerator: def __init__(self, astnode): self.generator = _ast_util.SourceGenerator(" " * 4) self.generator.visit(astnode) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/runtime.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/runtime.py old mode 100755 new mode 100644 index 0e7149b..6d7fa68 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/runtime.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/runtime.py @@ -1,5 +1,5 @@ # mako/runtime.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2020 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -7,16 +7,16 @@ """provides runtime services for templates, including Context, Namespace, and various helper functions.""" +import builtins import functools import sys from mako import compat from mako import exceptions from mako import util -from mako.compat import compat_builtins -class Context(object): +class Context: """Provides runtime namespace, output buffer, and various callstacks for templates. @@ -24,7 +24,7 @@ class Context(object): See :ref:`runtime_toplevel` for detail on the usage of :class:`.Context`. - """ + """ def __init__(self, buffer, **data): self._buffer_stack = [buffer] @@ -103,7 +103,7 @@ def __getitem__(self, key): if key in self._data: return self._data[key] else: - return compat_builtins.__dict__[key] + return builtins.__dict__[key] def _push_writer(self): """push a capturing buffer onto this Context and return @@ -135,7 +135,7 @@ def _pop_buffer(self): def get(self, key, default=None): """Return a value from this :class:`.Context`.""" - return self._data.get(key, compat_builtins.__dict__.get(key, default)) + return self._data.get(key, builtins.__dict__.get(key, default)) def write(self, string): """Write a string to this :class:`.Context` object's @@ -216,7 +216,7 @@ def _pop_frame(self): self.nextcaller = self.pop() -class Undefined(object): +class Undefined: """Represents an undefined value in a template. @@ -240,7 +240,7 @@ def __bool__(self): STOP_RENDERING = "" -class LoopStack(object): +class LoopStack: """a stack for LoopContexts that implements the context manager protocol to automatically pop off the top of the stack on context exit @@ -280,7 +280,7 @@ def __iter__(self): return iter(self._top) -class LoopContext(object): +class LoopContext: """A magic loop variable. Automatically accessible in any ``% for`` block. @@ -339,14 +339,13 @@ def odd(self): return bool(self.index % 2) def cycle(self, *values): - """Cycle through values as the loop progresses. - """ + """Cycle through values as the loop progresses.""" if not values: raise ValueError("You must provide values to cycle through") return values[self.index % len(values)] -class _NSAttr(object): +class _NSAttr: def __init__(self, parent): self.__parent = parent @@ -360,22 +359,22 @@ def __getattr__(self, key): raise AttributeError(key) -class Namespace(object): +class Namespace: """Provides access to collections of rendering methods, which - can be local, from other templates, or from imported modules. + can be local, from other templates, or from imported modules. - To access a particular rendering method referenced by a - :class:`.Namespace`, use plain attribute access: + To access a particular rendering method referenced by a + :class:`.Namespace`, use plain attribute access: - .. sourcecode:: mako + .. sourcecode:: mako - ${some_namespace.foo(x, y, z)} + ${some_namespace.foo(x, y, z)} - :class:`.Namespace` also contains several built-in attributes - described here. + :class:`.Namespace` also contains several built-in attributes + described here. - """ + """ def __init__( self, @@ -390,7 +389,7 @@ def __init__( self.context = context self.inherits = inherits if callables is not None: - self.callables = dict([(c.__name__, c) for c in callables]) + self.callables = {c.__name__: c for c in callables} callables = () @@ -482,15 +481,14 @@ def get_namespace(self, uri): key = (self, uri) if key in self.context.namespaces: return self.context.namespaces[key] - else: - ns = TemplateNamespace( - uri, - self.context._copy(), - templateuri=uri, - calling_uri=self._templateuri, - ) - self.context.namespaces[key] = ns - return ns + ns = TemplateNamespace( + uri, + self.context._copy(), + templateuri=uri, + calling_uri=self._templateuri, + ) + self.context.namespaces[key] = ns + return ns def get_template(self, uri): """Return a :class:`.Template` from the given ``uri``. @@ -574,7 +572,7 @@ def __init__( self.context = context self.inherits = inherits if callables is not None: - self.callables = dict([(c.__name__, c) for c in callables]) + self.callables = {c.__name__: c for c in callables} if templateuri is not None: self.template = _lookup_template(context, templateuri, calling_uri) @@ -666,7 +664,7 @@ def __init__( self.context = context self.inherits = inherits if callables is not None: - self.callables = dict([(c.__name__, c) for c in callables]) + self.callables = {c.__name__: c for c in callables} mod = __import__(module) for token in module.split(".")[1:]: @@ -790,7 +788,7 @@ def _include_file(context, uri, calling_uri, **kwargs): except Exception: result = template.include_error_handler(ctx, compat.exception_as()) if not result: - compat.reraise(*sys.exc_info()) + raise else: callable_(ctx, **kwargs) @@ -837,8 +835,10 @@ def _lookup_template(context, uri, relativeto): uri = lookup.adjust_uri(uri, relativeto) try: return lookup.get_template(uri) - except exceptions.TopLevelLookupException: - raise exceptions.TemplateLookupException(str(compat.exception_as())) + except exceptions.TopLevelLookupException as e: + raise exceptions.TemplateLookupException( + str(compat.exception_as()) + ) from e def _populate_self_namespace(context, template, self_ns=None): @@ -862,14 +862,10 @@ def _render(template, callable_, args, data, as_unicode=False): output of the given template and template callable.""" if as_unicode: - buf = util.FastEncodingBuffer(as_unicode=True) - elif template.bytestring_passthrough: - buf = compat.StringIO() + buf = util.FastEncodingBuffer() else: buf = util.FastEncodingBuffer( - as_unicode=as_unicode, - encoding=template.output_encoding, - errors=template.encoding_errors, + encoding=template.output_encoding, errors=template.encoding_errors ) context = Context(buf, **data) context._outputting_as_unicode = as_unicode @@ -880,7 +876,7 @@ def _render(template, callable_, args, data, as_unicode=False): callable_, context, *args, - **_kwargs_for_callable(callable_, data) + **_kwargs_for_callable(callable_, data), ) return context._pop_buffer().getvalue() @@ -951,13 +947,15 @@ def _render_error(template, context, error): if template.error_handler: result = template.error_handler(context, error) if not result: - compat.reraise(*sys.exc_info()) + tp, value, tb = sys.exc_info() + if value and tb: + raise value.with_traceback(tb) + else: + raise error else: error_template = exceptions.html_error_template() if context._outputting_as_unicode: - context._buffer_stack[:] = [ - util.FastEncodingBuffer(as_unicode=True) - ] + context._buffer_stack[:] = [util.FastEncodingBuffer()] else: context._buffer_stack[:] = [ util.FastEncodingBuffer( diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/template.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/template.py old mode 100755 new mode 100644 index 937d15b..8c70731 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/template.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/template.py @@ -1,5 +1,5 @@ # mako/template.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -12,7 +12,6 @@ import re import shutil import stat -import sys import tempfile import types import weakref @@ -26,7 +25,7 @@ from mako.lexer import Lexer -class Template(object): +class Template: r"""Represents a compiled template. @@ -54,17 +53,6 @@ class Template(object): of return-valued ``%def``\ s "opt out" of that filtering via passing special attributes or objects. - :param bytestring_passthrough: When ``True``, and ``output_encoding`` is - set to ``None``, and :meth:`.Template.render` is used to render, - the `StringIO` or `cStringIO` buffer will be used instead of the - default "fast" buffer. This allows raw bytestrings in the - output stream, such as in expressions, to pass straight - through to the buffer. This flag is forced - to ``True`` if ``disable_unicode`` is also configured. - - .. versionadded:: 0.4 - Added to provide the same behavior as that of the previous series. - :param cache_args: Dictionary of cache configuration arguments that will be passed to the :class:`.CacheImpl`. See :ref:`caching_toplevel`. @@ -95,9 +83,6 @@ class Template(object): :param default_filters: List of string filter names that will be applied to all expressions. See :ref:`filtering_default_filters`. - :param disable_unicode: Disables all awareness of Python Unicode - objects. See :ref:`unicode_disabled`. - :param enable_loop: When ``True``, enable the ``loop`` context variable. This can be set to ``False`` to support templates that may be making usage of the name "``loop``". Individual templates can @@ -256,9 +241,7 @@ def __init__( cache_url=None, module_filename=None, input_encoding=None, - disable_unicode=False, module_writer=None, - bytestring_passthrough=False, default_filters=None, buffer_filters=(), strict_undefined=False, @@ -295,26 +278,12 @@ def __init__( self.input_encoding = input_encoding self.output_encoding = output_encoding self.encoding_errors = encoding_errors - self.disable_unicode = disable_unicode - self.bytestring_passthrough = bytestring_passthrough or disable_unicode self.enable_loop = enable_loop self.strict_undefined = strict_undefined self.module_writer = module_writer - if compat.py3k and disable_unicode: - raise exceptions.UnsupportedError( - "Mako for Python 3 does not " "support disabling Unicode" - ) - elif output_encoding and disable_unicode: - raise exceptions.UnsupportedError( - "output_encoding must be set to " - "None when disable_unicode is used." - ) if default_filters is None: - if compat.py3k or self.disable_unicode: - self.default_filters = ["str"] - else: - self.default_filters = ["unicode"] + self.default_filters = ["str"] else: self.default_filters = default_filters self.buffer_filters = buffer_filters @@ -388,11 +357,7 @@ def _setup_cache_args( ): self.cache_impl = cache_impl self.cache_enabled = cache_enabled - if cache_args: - self.cache_args = cache_args - else: - self.cache_args = {} - + self.cache_args = cache_args or {} # transfer deprecated cache_* args if cache_type: self.cache_args["type"] = cache_type @@ -414,14 +379,12 @@ def _compile_from_file(self, path, filename): self, data, filename, path, self.module_writer ) module = compat.load_module(self.module_id, path) - del sys.modules[self.module_id] if module._magic_number != codegen.MAGIC_NUMBER: data = util.read_file(filename) _compile_module_file( self, data, filename, path, self.module_writer ) module = compat.load_module(self.module_id, path) - del sys.modules[self.module_id] ModuleInfo(module, path, self, filename, None, None, None) else: # template filename and no module directory, compile code @@ -466,7 +429,7 @@ def render(self, *args, **data): If the template specifies an output encoding, the string will be encoded accordingly, else the output is raw (raw - output uses `cStringIO` and can't handle multibyte + output uses `StringIO` and can't handle multibyte characters). A :class:`.Context` object is created corresponding to the given data. Arguments that are explicitly declared by this template's internal rendering method are also @@ -520,17 +483,17 @@ class ModuleTemplate(Template): """A Template which is constructed given an existing Python module. - e.g.:: + e.g.:: - t = Template("this is a template") - f = file("mymodule.py", "w") - f.write(t.code) - f.close() + t = Template("this is a template") + f = file("mymodule.py", "w") + f.write(t.code) + f.close() - import mymodule + import mymodule - t = ModuleTemplate(mymodule) - print t.render() + t = ModuleTemplate(mymodule) + print(t.render()) """ @@ -544,8 +507,6 @@ def __init__( template_source=None, output_encoding=None, encoding_errors="strict", - disable_unicode=False, - bytestring_passthrough=False, format_exceptions=False, error_handler=None, lookup=None, @@ -562,20 +523,8 @@ def __init__( self.input_encoding = module._source_encoding self.output_encoding = output_encoding self.encoding_errors = encoding_errors - self.disable_unicode = disable_unicode - self.bytestring_passthrough = bytestring_passthrough or disable_unicode self.enable_loop = module._enable_loop - if compat.py3k and disable_unicode: - raise exceptions.UnsupportedError( - "Mako for Python 3 does not " "support disabling Unicode" - ) - elif output_encoding and disable_unicode: - raise exceptions.UnsupportedError( - "output_encoding must be set to " - "None when disable_unicode is used." - ) - self.module = module self.filename = template_filename ModuleInfo( @@ -619,19 +568,18 @@ def __init__(self, parent, callable_): self.include_error_handler = parent.include_error_handler self.enable_loop = parent.enable_loop self.lookup = parent.lookup - self.bytestring_passthrough = parent.bytestring_passthrough def get_def(self, name): return self.parent.get_def(name) -class ModuleInfo(object): +class ModuleInfo: """Stores information about a module currently loaded into memory, provides reverse lookups of template source, module source code based on a module's identifier. - """ + """ _modules = weakref.WeakValueDictionary() @@ -661,9 +609,9 @@ def get_module_source_metadata(cls, module_source, full_line_map=False): r"__M_BEGIN_METADATA(.+?)__M_END_METADATA", module_source, re.S ).group(1) source_map = json.loads(source_map) - source_map["line_map"] = dict( - (int(k), int(v)) for k, v in source_map["line_map"].items() - ) + source_map["line_map"] = { + int(k): int(v) for k, v in source_map["line_map"].items() + } if full_line_map: f_line_map = source_map["full_line_map"] = [] line_map = source_map["line_map"] @@ -684,28 +632,25 @@ def code(self): @property def source(self): - if self.template_source is not None: - if self.module._source_encoding and not isinstance( - self.template_source, compat.text_type - ): - return self.template_source.decode( - self.module._source_encoding - ) - else: - return self.template_source - else: + if self.template_source is None: data = util.read_file(self.template_filename) if self.module._source_encoding: return data.decode(self.module._source_encoding) else: return data + elif self.module._source_encoding and not isinstance( + self.template_source, str + ): + return self.template_source.decode(self.module._source_encoding) + else: + return self.template_source + def _compile(template, text, filename, generate_magic_comment): lexer = template.lexer_cls( text, filename, - disable_unicode=template.disable_unicode, input_encoding=template.input_encoding, preprocessor=template.preprocessor, ) @@ -720,7 +665,6 @@ def _compile(template, text, filename, generate_magic_comment): future_imports=template.future_imports, source_encoding=lexer.encoding, generate_magic_comment=generate_magic_comment, - disable_unicode=template.disable_unicode, strict_undefined=template.strict_undefined, enable_loop=template.enable_loop, reserved_names=template.reserved_names, @@ -731,15 +675,10 @@ def _compile(template, text, filename, generate_magic_comment): def _compile_text(template, text, filename): identifier = template.module_id source, lexer = _compile( - template, - text, - filename, - generate_magic_comment=template.disable_unicode, + template, text, filename, generate_magic_comment=False ) cid = identifier - if not compat.py3k and isinstance(cid, compat.text_type): - cid = cid.encode() module = types.ModuleType(cid) code = compile(source, cid, "exec") @@ -753,7 +692,7 @@ def _compile_module_file(template, text, filename, outputpath, module_writer): template, text, filename, generate_magic_comment=True ) - if isinstance(source, compat.text_type): + if isinstance(source, str): source = source.encode(lexer.encoding or "ascii") if module_writer: @@ -770,10 +709,7 @@ def _compile_module_file(template, text, filename, outputpath, module_writer): def _get_module_info_from_callable(callable_): - if compat.py3k: - return _get_module_info(callable_.__globals__["__name__"]) - else: - return _get_module_info(callable_.func_globals["__name__"]) + return _get_module_info(callable_.__globals__["__name__"]) def _get_module_info(filename): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/_config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/_config.py new file mode 100644 index 0000000..4ee3d0a --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/_config.py @@ -0,0 +1,128 @@ +import configparser +import dataclasses +from dataclasses import dataclass +from pathlib import Path +from typing import Callable +from typing import ClassVar +from typing import Optional +from typing import Union + +from .helpers import make_path + + +class ConfigError(BaseException): + pass + + +class MissingConfig(ConfigError): + pass + + +class MissingConfigSection(ConfigError): + pass + + +class MissingConfigItem(ConfigError): + pass + + +class ConfigValueTypeError(ConfigError): + pass + + +class _GetterDispatch: + def __init__(self, initialdata, default_getter: Callable): + self.default_getter = default_getter + self.data = initialdata + + def get_fn_for_type(self, type_): + return self.data.get(type_, self.default_getter) + + def get_typed_value(self, type_, name): + get_fn = self.get_fn_for_type(type_) + return get_fn(name) + + +def _parse_cfg_file(filespec: Union[Path, str]): + cfg = configparser.ConfigParser() + try: + filepath = make_path(filespec, check_exists=True) + except FileNotFoundError as e: + raise MissingConfig(f"No config file found at {filespec}") from e + else: + with open(filepath, encoding="utf-8") as f: + cfg.read_file(f) + return cfg + + +def _build_getter(cfg_obj, cfg_section, method, converter=None): + def caller(option, **kwargs): + try: + rv = getattr(cfg_obj, method)(cfg_section, option, **kwargs) + except configparser.NoSectionError as nse: + raise MissingConfigSection( + f"No config section named {cfg_section}" + ) from nse + except configparser.NoOptionError as noe: + raise MissingConfigItem(f"No config item for {option}") from noe + except ValueError as ve: + # ConfigParser.getboolean, .getint, .getfloat raise ValueError + # on bad types + raise ConfigValueTypeError( + f"Wrong value type for {option}" + ) from ve + else: + if converter: + try: + rv = converter(rv) + except Exception as e: + raise ConfigValueTypeError( + f"Wrong value type for {option}" + ) from e + return rv + + return caller + + +def _build_getter_dispatch(cfg_obj, cfg_section, converters=None): + converters = converters or {} + + default_getter = _build_getter(cfg_obj, cfg_section, "get") + + # support ConfigParser builtins + getters = { + int: _build_getter(cfg_obj, cfg_section, "getint"), + bool: _build_getter(cfg_obj, cfg_section, "getboolean"), + float: _build_getter(cfg_obj, cfg_section, "getfloat"), + str: default_getter, + } + + # use ConfigParser.get and convert value + getters.update( + { + type_: _build_getter( + cfg_obj, cfg_section, "get", converter=converter_fn + ) + for type_, converter_fn in converters.items() + } + ) + + return _GetterDispatch(getters, default_getter) + + +@dataclass +class ReadsCfg: + section_header: ClassVar[str] + converters: ClassVar[Optional[dict]] = None + + @classmethod + def from_cfg_file(cls, filespec: Union[Path, str]): + cfg = _parse_cfg_file(filespec) + dispatch = _build_getter_dispatch( + cfg, cls.section_header, converters=cls.converters + ) + kwargs = { + field.name: dispatch.get_typed_value(field.type, field.name) + for field in dataclasses.fields(cls) + } + return cls(**kwargs) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/assertions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/assertions.py new file mode 100644 index 0000000..14ea635 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/assertions.py @@ -0,0 +1,167 @@ +import contextlib +import re +import sys + + +def eq_(a, b, msg=None): + """Assert a == b, with repr messaging on failure.""" + assert a == b, msg or "%r != %r" % (a, b) + + +def ne_(a, b, msg=None): + """Assert a != b, with repr messaging on failure.""" + assert a != b, msg or "%r == %r" % (a, b) + + +def in_(a, b, msg=None): + """Assert a in b, with repr messaging on failure.""" + assert a in b, msg or "%r not in %r" % (a, b) + + +def not_in(a, b, msg=None): + """Assert a in not b, with repr messaging on failure.""" + assert a not in b, msg or "%r is in %r" % (a, b) + + +def _assert_proper_exception_context(exception): + """assert that any exception we're catching does not have a __context__ + without a __cause__, and that __suppress_context__ is never set. + + Python 3 will report nested as exceptions as "during the handling of + error X, error Y occurred". That's not what we want to do. We want + these exceptions in a cause chain. + + """ + + if ( + exception.__context__ is not exception.__cause__ + and not exception.__suppress_context__ + ): + assert False, ( + "Exception %r was correctly raised but did not set a cause, " + "within context %r as its cause." + % (exception, exception.__context__) + ) + + +def _assert_proper_cause_cls(exception, cause_cls): + """assert that any exception we're catching does not have a __context__ + without a __cause__, and that __suppress_context__ is never set. + + Python 3 will report nested as exceptions as "during the handling of + error X, error Y occurred". That's not what we want to do. We want + these exceptions in a cause chain. + + """ + assert isinstance(exception.__cause__, cause_cls), ( + "Exception %r was correctly raised but has cause %r, which does not " + "have the expected cause type %r." + % (exception, exception.__cause__, cause_cls) + ) + + +def assert_raises(except_cls, callable_, *args, **kw): + return _assert_raises(except_cls, callable_, args, kw) + + +def assert_raises_with_proper_context(except_cls, callable_, *args, **kw): + return _assert_raises(except_cls, callable_, args, kw, check_context=True) + + +def assert_raises_with_given_cause( + except_cls, cause_cls, callable_, *args, **kw +): + return _assert_raises(except_cls, callable_, args, kw, cause_cls=cause_cls) + + +def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): + return _assert_raises(except_cls, callable_, args, kwargs, msg=msg) + + +def assert_raises_message_with_proper_context( + except_cls, msg, callable_, *args, **kwargs +): + return _assert_raises( + except_cls, callable_, args, kwargs, msg=msg, check_context=True + ) + + +def assert_raises_message_with_given_cause( + except_cls, msg, cause_cls, callable_, *args, **kwargs +): + return _assert_raises( + except_cls, callable_, args, kwargs, msg=msg, cause_cls=cause_cls + ) + + +def _assert_raises( + except_cls, + callable_, + args, + kwargs, + msg=None, + check_context=False, + cause_cls=None, +): + + with _expect_raises(except_cls, msg, check_context, cause_cls) as ec: + callable_(*args, **kwargs) + return ec.error + + +class _ErrorContainer: + error = None + + +@contextlib.contextmanager +def _expect_raises(except_cls, msg=None, check_context=False, cause_cls=None): + ec = _ErrorContainer() + if check_context: + are_we_already_in_a_traceback = sys.exc_info()[0] + try: + yield ec + success = False + except except_cls as err: + ec.error = err + success = True + if msg is not None: + # I'm often pdbing here, and "err" above isn't + # in scope, so assign the string explicitly + error_as_string = str(err) + assert re.search(msg, error_as_string, re.UNICODE), "%r !~ %s" % ( + msg, + error_as_string, + ) + if cause_cls is not None: + _assert_proper_cause_cls(err, cause_cls) + if check_context and not are_we_already_in_a_traceback: + _assert_proper_exception_context(err) + print(str(err).encode("utf-8")) + + # it's generally a good idea to not carry traceback objects outside + # of the except: block, but in this case especially we seem to have + # hit some bug in either python 3.10.0b2 or greenlet or both which + # this seems to fix: + # https://github.com/python-greenlet/greenlet/issues/242 + del ec + + # assert outside the block so it works for AssertionError too ! + assert success, "Callable did not raise an exception" + + +def expect_raises(except_cls, check_context=False): + return _expect_raises(except_cls, check_context=check_context) + + +def expect_raises_message(except_cls, msg, check_context=False): + return _expect_raises(except_cls, msg=msg, check_context=check_context) + + +def expect_raises_with_proper_context(except_cls, check_context=True): + return _expect_raises(except_cls, check_context=check_context) + + +def expect_raises_message_with_proper_context( + except_cls, msg, check_context=True +): + return _expect_raises(except_cls, msg=msg, check_context=check_context) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/config.py new file mode 100644 index 0000000..b77d0c0 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/config.py @@ -0,0 +1,17 @@ +from dataclasses import dataclass +from pathlib import Path + +from ._config import ReadsCfg +from .helpers import make_path + + +@dataclass +class Config(ReadsCfg): + module_base: Path + template_base: Path + + section_header = "mako_testing" + converters = {Path: make_path} + + +config = Config.from_cfg_file("./setup.cfg") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/exclusions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/exclusions.py new file mode 100644 index 0000000..37b2d14 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/exclusions.py @@ -0,0 +1,80 @@ +import pytest + +from mako.ext.beaker_cache import has_beaker +from mako.util import update_wrapper + + +try: + import babel.messages.extract as babel +except ImportError: + babel = None + + +try: + import lingua +except ImportError: + lingua = None + + +try: + import dogpile.cache # noqa +except ImportError: + has_dogpile_cache = False +else: + has_dogpile_cache = True + + +requires_beaker = pytest.mark.skipif( + not has_beaker, reason="Beaker is required for these tests." +) + + +requires_babel = pytest.mark.skipif( + babel is None, reason="babel not installed: skipping babelplugin test" +) + + +requires_lingua = pytest.mark.skipif( + lingua is None, reason="lingua not installed: skipping linguaplugin test" +) + + +requires_dogpile_cache = pytest.mark.skipif( + not has_dogpile_cache, + reason="dogpile.cache is required to run these tests", +) + + +def _pygments_version(): + try: + import pygments + + version = pygments.__version__ + except: + version = "0" + return version + + +requires_pygments_14 = pytest.mark.skipif( + _pygments_version() < "1.4", reason="Requires pygments 1.4 or greater" +) + + +# def requires_pygments_14(fn): + +# return skip_if( +# lambda: version < "1.4", "Requires pygments 1.4 or greater" +# )(fn) + + +def requires_no_pygments_exceptions(fn): + def go(*arg, **kw): + from mako import exceptions + + exceptions._install_fallback() + try: + return fn(*arg, **kw) + finally: + exceptions._install_highlighting() + + return update_wrapper(go, fn) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/fixtures.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/fixtures.py new file mode 100644 index 0000000..01e9961 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/fixtures.py @@ -0,0 +1,119 @@ +import os + +from mako.cache import CacheImpl +from mako.cache import register_plugin +from mako.template import Template +from .assertions import eq_ +from .config import config + + +class TemplateTest: + def _file_template(self, filename, **kw): + filepath = self._file_path(filename) + return Template( + uri=filename, + filename=filepath, + module_directory=config.module_base, + **kw, + ) + + def _file_path(self, filename): + name, ext = os.path.splitext(filename) + py3k_path = os.path.join(config.template_base, name + "_py3k" + ext) + if os.path.exists(py3k_path): + return py3k_path + + return os.path.join(config.template_base, filename) + + def _do_file_test( + self, + filename, + expected, + filters=None, + unicode_=True, + template_args=None, + **kw, + ): + t1 = self._file_template(filename, **kw) + self._do_test( + t1, + expected, + filters=filters, + unicode_=unicode_, + template_args=template_args, + ) + + def _do_memory_test( + self, + source, + expected, + filters=None, + unicode_=True, + template_args=None, + **kw, + ): + t1 = Template(text=source, **kw) + self._do_test( + t1, + expected, + filters=filters, + unicode_=unicode_, + template_args=template_args, + ) + + def _do_test( + self, + template, + expected, + filters=None, + template_args=None, + unicode_=True, + ): + if template_args is None: + template_args = {} + if unicode_: + output = template.render_unicode(**template_args) + else: + output = template.render(**template_args) + + if filters: + output = filters(output) + eq_(output, expected) + + def indicates_unbound_local_error(self, rendered_output, unbound_var): + var = f"'{unbound_var}'" + error_msgs = ( + # < 3.11 + f"local variable {var} referenced before assignment", + # >= 3.11 + f"cannot access local variable {var} where it is not associated", + ) + return any((msg in rendered_output) for msg in error_msgs) + + +class PlainCacheImpl(CacheImpl): + """Simple memory cache impl so that tests which + use caching can run without beaker.""" + + def __init__(self, cache): + self.cache = cache + self.data = {} + + def get_or_create(self, key, creation_function, **kw): + if key in self.data: + return self.data[key] + else: + self.data[key] = data = creation_function(**kw) + return data + + def put(self, key, value, **kw): + self.data[key] = value + + def get(self, key, **kw): + return self.data[key] + + def invalidate(self, key, **kw): + del self.data[key] + + +register_plugin("plain", __name__, "PlainCacheImpl") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/helpers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/helpers.py new file mode 100644 index 0000000..77cca36 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/testing/helpers.py @@ -0,0 +1,67 @@ +import contextlib +import pathlib +from pathlib import Path +import re +import time +from typing import Union +from unittest import mock + + +def flatten_result(result): + return re.sub(r"[\s\r\n]+", " ", result).strip() + + +def result_lines(result): + return [ + x.strip() + for x in re.split(r"\r?\n", re.sub(r" +", " ", result)) + if x.strip() != "" + ] + + +def make_path( + filespec: Union[Path, str], + make_absolute: bool = True, + check_exists: bool = False, +) -> Path: + path = Path(filespec) + if make_absolute: + path = path.resolve(strict=check_exists) + if check_exists and (not path.exists()): + raise FileNotFoundError(f"No file or directory at {filespec}") + return path + + +def _unlink_path(path, missing_ok=False): + # Replicate 3.8+ functionality in 3.7 + cm = contextlib.nullcontext() + if missing_ok: + cm = contextlib.suppress(FileNotFoundError) + + with cm: + path.unlink() + + +def replace_file_with_dir(pathspec): + path = pathlib.Path(pathspec) + _unlink_path(path, missing_ok=True) + path.mkdir(exist_ok=True) + return path + + +def file_with_template_code(filespec): + with open(filespec, "w") as f: + f.write( + """ +i am an artificial template just for you +""" + ) + return filespec + + +@contextlib.contextmanager +def rewind_compile_time(hours=1): + rewound = time.time() - (hours * 3_600) + with mock.patch("mako.codegen.time") as codegen_time: + codegen_time.time.return_value = rewound + yield diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/util.py old mode 100755 new mode 100644 index 07f7531..5fb683b --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/util.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/mako/util.py @@ -1,9 +1,9 @@ # mako/util.py -# Copyright 2006-2019 the Mako authors and contributors +# Copyright 2006-2022 the Mako authors and contributors # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php - +from ast import parse import codecs import collections import operator @@ -11,7 +11,7 @@ import re import timeit -from mako import compat +from .compat import importlib_metadata_get def update_wrapper(decorated, fn): @@ -20,7 +20,7 @@ def update_wrapper(decorated, fn): return decorated -class PluginLoader(object): +class PluginLoader: def __init__(self, group): self.group = group self.impls = {} @@ -28,18 +28,17 @@ def __init__(self, group): def load(self, name): if name in self.impls: return self.impls[name]() - else: - import pkg_resources - for impl in pkg_resources.iter_entry_points(self.group, name): + for impl in importlib_metadata_get(self.group): + if impl.name == name: self.impls[name] = impl.load return impl.load() - else: - from mako import exceptions - raise exceptions.RuntimeException( - "Can't load plugin %s %s" % (self.group, name) - ) + from mako import exceptions + + raise exceptions.RuntimeException( + "Can't load plugin %s %s" % (self.group, name) + ) def register(self, name, modulepath, objname): def load(): @@ -59,7 +58,7 @@ def verify_directory(dir_): while not os.path.exists(dir_): try: tries += 1 - os.makedirs(dir_, compat.octal("0775")) + os.makedirs(dir_, 0o755) except: if tries > 5: raise @@ -74,7 +73,7 @@ def to_list(x, default=None): return x -class memoized_property(object): +class memoized_property: """A read-only @property that is only evaluated once.""" @@ -90,7 +89,7 @@ def __get__(self, obj, cls): return result -class memoized_instancemethod(object): +class memoized_instancemethod: """Decorate a method memoize its return value. @@ -138,19 +137,15 @@ def union(self, other): return x -class FastEncodingBuffer(object): +class FastEncodingBuffer: """a very rudimentary buffer that is faster than StringIO, - but doesn't crash on unicode data like cStringIO.""" + and supports unicode data.""" - def __init__(self, encoding=None, errors="strict", as_unicode=False): + def __init__(self, encoding=None, errors="strict"): self.data = collections.deque() self.encoding = encoding - if as_unicode: - self.delim = compat.u("") - else: - self.delim = "" - self.as_unicode = as_unicode + self.delim = "" self.errors = errors self.write = self.data.append @@ -177,7 +172,7 @@ class LRUCache(dict): is inexact. """ - class _Item(object): + class _Item: def __init__(self, key, value): self.key = key self.value = value @@ -201,9 +196,8 @@ def values(self): def setdefault(self, key, value): if key in self: return self[key] - else: - self[key] = value - return value + self[key] = value + return value def __setitem__(self, key, value): item = dict.get(self, key) @@ -257,9 +251,7 @@ def parse_encoding(fp): m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode("ascii", "ignore")) if not m: try: - import parser - - parser.suite(line1.decode("ascii", "ignore")) + parse(line1.decode("ascii", "ignore")) except (ImportError, SyntaxError): # Either it's a real syntax error, in which case the source # is not valid python source, or line2 is a continuation of @@ -295,7 +287,7 @@ def sorted_dict_repr(d): """ keys = list(d.keys()) keys.sort() - return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}" + return "{" + ", ".join("%r: %r" % (k, d[k]) for k in keys) + "}" def restore__ast(_ast): @@ -308,7 +300,7 @@ def restore__ast(_ast): m = compile( """\ def foo(): pass -class Bar(object): pass +class Bar: pass if False: pass baz = 'mako' 1 + 2 - 3 * 4 / 5 @@ -380,12 +372,8 @@ class Bar(object): pass def read_file(path, mode="rb"): - fp = open(path, mode) - try: - data = fp.read() - return data - finally: - fp.close() + with open(path, mode) as fp: + return fp.read() def read_python_file(path): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/markupsafe/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/markupsafe/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/markupsafe/_native.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/markupsafe/_native.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/modinput_wrapper/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/modinput_wrapper/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch-2.3.2.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch-2.3.2.dist-info/RECORD index 18a8852..0f27efb 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch-2.3.2.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch-2.3.2.dist-info/RECORD @@ -3,7 +3,7 @@ munch-2.3.2.dist-info/LICENSE.txt,sha256=V8qVySBZyDgGJRkkYpeb0ymUquP835Av9useRn7 munch-2.3.2.dist-info/METADATA,sha256=8f76jlBWV-he4FEQir_-TvfmKxfPNqm5bwIvbE6gims,865 munch-2.3.2.dist-info/RECORD,, munch-2.3.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -munch-2.3.2.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +munch-2.3.2.dist-info/WHEEL,sha256=a-zpFRIJzOq5QfuhBzbhiA1eHTzNCJn8OdRvhdNX0Rk,110 munch-2.3.2.dist-info/top_level.txt,sha256=PRHN8MYaV54issXsc-3Sde-NdKBLL7BXsafd7Haw8IE,6 munch/__init__.py,sha256=UQvBwwtPbqAzNzIADInzc1fWpX38nFztUDboLQigu0o,16556 munch/python3_compat.py,sha256=fguSsh5lZOxfWI6r4hvima0N8dYn167fXqpRkqu5OEw,71 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch-2.3.2.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch-2.3.2.dist-info/WHEEL index 0b18a28..f771c29 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch-2.3.2.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch-2.3.2.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) +Generator: bdist_wheel (0.40.0) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch/python3_compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/munch/python3_compat.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/__init__.py deleted file mode 100755 index 1471303..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/__init__.py +++ /dev/null @@ -1,90 +0,0 @@ -# coding=utf-8 -""" -past: compatibility with Python 2 from Python 3 -=============================================== - -``past`` is a package to aid with Python 2/3 compatibility. Whereas ``future`` -contains backports of Python 3 constructs to Python 2, ``past`` provides -implementations of some Python 2 constructs in Python 3 and tools to import and -run Python 2 code in Python 3. It is intended to be used sparingly, as a way of -running old Python 2 code from Python 3 until the code is ported properly. - -Potential uses for libraries: - -- as a step in porting a Python 2 codebase to Python 3 (e.g. with the ``futurize`` script) -- to provide Python 3 support for previously Python 2-only libraries with the - same APIs as on Python 2 -- particularly with regard to 8-bit strings (the - ``past.builtins.str`` type). -- to aid in providing minimal-effort Python 3 support for applications using - libraries that do not yet wish to upgrade their code properly to Python 3, or - wish to upgrade it gradually to Python 3 style. - - -Here are some code examples that run identically on Python 3 and 2:: - - >>> from past.builtins import str as oldstr - - >>> philosopher = oldstr(u'\u5b54\u5b50'.encode('utf-8')) - >>> # This now behaves like a Py2 byte-string on both Py2 and Py3. - >>> # For example, indexing returns a Python 2-like string object, not - >>> # an integer: - >>> philosopher[0] - '\xe5' - >>> type(philosopher[0]) - - - >>> # List-producing versions of range, reduce, map, filter - >>> from past.builtins import range, reduce - >>> range(10) - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) - 15 - - >>> # Other functions removed in Python 3 are resurrected ... - >>> from past.builtins import execfile - >>> execfile('myfile.py') - - >>> from past.builtins import raw_input - >>> name = raw_input('What is your name? ') - What is your name? [cursor] - - >>> from past.builtins import reload - >>> reload(mymodule) # equivalent to imp.reload(mymodule) in Python 3 - - >>> from past.builtins import xrange - >>> for i in xrange(10): - ... pass - - -It also provides import hooks so you can import and use Python 2 modules like -this:: - - $ python3 - - >>> from past.translation import autotranslate - >>> authotranslate('mypy2module') - >>> import mypy2module - -until the authors of the Python 2 modules have upgraded their code. Then, for -example:: - - >>> mypy2module.func_taking_py2_string(oldstr(b'abcd')) - - -Credits -------- - -:Author: Ed Schofield, Jordan M. Adler, et al -:Sponsor: Python Charmers Pty Ltd, Australia: http://pythoncharmers.com - - -Licensing ---------- -Copyright 2013-2019 Python Charmers Pty Ltd, Australia. -The software is distributed under an MIT licence. See LICENSE.txt. -""" - -from future import __version__, __copyright__, __license__ - -__title__ = 'past' -__author__ = 'Ed Schofield' diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/__init__.py deleted file mode 100755 index 1b19e37..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/__init__.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -A resurrection of some old functions from Python 2 for use in Python 3. These -should be used sparingly, to help with porting efforts, since code using them -is no longer standard Python 3 code. - -This module provides the following: - -1. Implementations of these builtin functions which have no equivalent on Py3: - -- apply -- chr -- cmp -- execfile - -2. Aliases: - -- intern <- sys.intern -- raw_input <- input -- reduce <- functools.reduce -- reload <- imp.reload -- unichr <- chr -- unicode <- str -- xrange <- range - -3. List-producing versions of the corresponding Python 3 iterator-producing functions: - -- filter -- map -- range -- zip - -4. Forward-ported Py2 types: - -- basestring -- dict -- str -- long -- unicode - -""" - -from future.utils import PY3 -from past.builtins.noniterators import (filter, map, range, reduce, zip) -# from past.builtins.misc import (ascii, hex, input, oct, open) -if PY3: - from past.types import (basestring, - olddict as dict, - oldstr as str, - long, - unicode) -else: - from __builtin__ import (basestring, dict, str, long, unicode) - -from past.builtins.misc import (apply, chr, cmp, execfile, intern, oct, - raw_input, reload, unichr, unicode, xrange) -from past import utils - - -if utils.PY3: - # We only import names that shadow the builtins on Py3. No other namespace - # pollution on Py3. - - # Only shadow builtins on Py3; no new names - __all__ = ['filter', 'map', 'range', 'reduce', 'zip', - 'basestring', 'dict', 'str', 'long', 'unicode', - 'apply', 'chr', 'cmp', 'execfile', 'intern', 'raw_input', - 'reload', 'unichr', 'xrange' - ] - -else: - # No namespace pollution on Py2 - __all__ = [] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/misc.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/misc.py deleted file mode 100755 index ba50aa9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/misc.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import unicode_literals - -import inspect - -from future.utils import PY2, PY3, exec_ - -if PY2: - from collections import Mapping -else: - from collections.abc import Mapping - -if PY3: - import builtins - from collections.abc import Mapping - - def apply(f, *args, **kw): - return f(*args, **kw) - - from past.builtins import str as oldstr - - def chr(i): - """ - Return a byte-string of one character with ordinal i; 0 <= i <= 256 - """ - return oldstr(bytes((i,))) - - def cmp(x, y): - """ - cmp(x, y) -> integer - - Return negative if xy. - """ - return (x > y) - (x < y) - - from sys import intern - - def oct(number): - """oct(number) -> string - - Return the octal representation of an integer - """ - return '0' + builtins.oct(number)[2:] - - raw_input = input - from imp import reload - unicode = str - unichr = chr - xrange = range -else: - import __builtin__ - from collections import Mapping - apply = __builtin__.apply - chr = __builtin__.chr - cmp = __builtin__.cmp - execfile = __builtin__.execfile - intern = __builtin__.intern - oct = __builtin__.oct - raw_input = __builtin__.raw_input - reload = __builtin__.reload - unicode = __builtin__.unicode - unichr = __builtin__.unichr - xrange = __builtin__.xrange - - -if PY3: - def execfile(filename, myglobals=None, mylocals=None): - """ - Read and execute a Python script from a file in the given namespaces. - The globals and locals are dictionaries, defaulting to the current - globals and locals. If only globals is given, locals defaults to it. - """ - if myglobals is None: - # There seems to be no alternative to frame hacking here. - caller_frame = inspect.stack()[1] - myglobals = caller_frame[0].f_globals - mylocals = caller_frame[0].f_locals - elif mylocals is None: - # Only if myglobals is given do we set mylocals to it. - mylocals = myglobals - if not isinstance(myglobals, Mapping): - raise TypeError('globals must be a mapping') - if not isinstance(mylocals, Mapping): - raise TypeError('locals must be a mapping') - with open(filename, "rb") as fin: - source = fin.read() - code = compile(source, filename, "exec") - exec_(code, myglobals, mylocals) - - -if PY3: - __all__ = ['apply', 'chr', 'cmp', 'execfile', 'intern', 'raw_input', - 'reload', 'unichr', 'unicode', 'xrange'] -else: - __all__ = [] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/noniterators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/noniterators.py deleted file mode 100755 index 183ffff..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/builtins/noniterators.py +++ /dev/null @@ -1,272 +0,0 @@ -""" -This module is designed to be used as follows:: - - from past.builtins.noniterators import filter, map, range, reduce, zip - -And then, for example:: - - assert isinstance(range(5), list) - -The list-producing functions this brings in are:: - -- ``filter`` -- ``map`` -- ``range`` -- ``reduce`` -- ``zip`` - -""" - -from __future__ import division, absolute_import, print_function - -from itertools import chain, starmap -import itertools # since zip_longest doesn't exist on Py2 -from past.types import basestring -from past.utils import PY3 - - -def flatmap(f, items): - return chain.from_iterable(map(f, items)) - - -if PY3: - import builtins - - # list-producing versions of the major Python iterating functions - def oldfilter(*args): - """ - filter(function or None, sequence) -> list, tuple, or string - - Return those items of sequence for which function(item) is true. - If function is None, return the items that are true. If sequence - is a tuple or string, return the same type, else return a list. - """ - mytype = type(args[1]) - if isinstance(args[1], basestring): - return mytype().join(builtins.filter(*args)) - elif isinstance(args[1], (tuple, list)): - return mytype(builtins.filter(*args)) - else: - # Fall back to list. Is this the right thing to do? - return list(builtins.filter(*args)) - - # This is surprisingly difficult to get right. For example, the - # solutions here fail with the test cases in the docstring below: - # http://stackoverflow.com/questions/8072755/ - def oldmap(func, *iterables): - """ - map(function, sequence[, sequence, ...]) -> list - - Return a list of the results of applying the function to the - items of the argument sequence(s). If more than one sequence is - given, the function is called with an argument list consisting of - the corresponding item of each sequence, substituting None for - missing values when not all sequences have the same length. If - the function is None, return a list of the items of the sequence - (or a list of tuples if more than one sequence). - - Test cases: - >>> oldmap(None, 'hello world') - ['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd'] - - >>> oldmap(None, range(4)) - [0, 1, 2, 3] - - More test cases are in test_past.test_builtins. - """ - zipped = itertools.zip_longest(*iterables) - l = list(zipped) - if len(l) == 0: - return [] - if func is None: - result = l - else: - result = list(starmap(func, l)) - - # Inspect to see whether it's a simple sequence of tuples - try: - if max([len(item) for item in result]) == 1: - return list(chain.from_iterable(result)) - # return list(flatmap(func, result)) - except TypeError as e: - # Simple objects like ints have no len() - pass - return result - - ############################ - ### For reference, the source code for Py2.7 map function: - # static PyObject * - # builtin_map(PyObject *self, PyObject *args) - # { - # typedef struct { - # PyObject *it; /* the iterator object */ - # int saw_StopIteration; /* bool: did the iterator end? */ - # } sequence; - # - # PyObject *func, *result; - # sequence *seqs = NULL, *sqp; - # Py_ssize_t n, len; - # register int i, j; - # - # n = PyTuple_Size(args); - # if (n < 2) { - # PyErr_SetString(PyExc_TypeError, - # "map() requires at least two args"); - # return NULL; - # } - # - # func = PyTuple_GetItem(args, 0); - # n--; - # - # if (func == Py_None) { - # if (PyErr_WarnPy3k("map(None, ...) not supported in 3.x; " - # "use list(...)", 1) < 0) - # return NULL; - # if (n == 1) { - # /* map(None, S) is the same as list(S). */ - # return PySequence_List(PyTuple_GetItem(args, 1)); - # } - # } - # - # /* Get space for sequence descriptors. Must NULL out the iterator - # * pointers so that jumping to Fail_2 later doesn't see trash. - # */ - # if ((seqs = PyMem_NEW(sequence, n)) == NULL) { - # PyErr_NoMemory(); - # return NULL; - # } - # for (i = 0; i < n; ++i) { - # seqs[i].it = (PyObject*)NULL; - # seqs[i].saw_StopIteration = 0; - # } - # - # /* Do a first pass to obtain iterators for the arguments, and set len - # * to the largest of their lengths. - # */ - # len = 0; - # for (i = 0, sqp = seqs; i < n; ++i, ++sqp) { - # PyObject *curseq; - # Py_ssize_t curlen; - # - # /* Get iterator. */ - # curseq = PyTuple_GetItem(args, i+1); - # sqp->it = PyObject_GetIter(curseq); - # if (sqp->it == NULL) { - # static char errmsg[] = - # "argument %d to map() must support iteration"; - # char errbuf[sizeof(errmsg) + 25]; - # PyOS_snprintf(errbuf, sizeof(errbuf), errmsg, i+2); - # PyErr_SetString(PyExc_TypeError, errbuf); - # goto Fail_2; - # } - # - # /* Update len. */ - # curlen = _PyObject_LengthHint(curseq, 8); - # if (curlen > len) - # len = curlen; - # } - # - # /* Get space for the result list. */ - # if ((result = (PyObject *) PyList_New(len)) == NULL) - # goto Fail_2; - # - # /* Iterate over the sequences until all have stopped. */ - # for (i = 0; ; ++i) { - # PyObject *alist, *item=NULL, *value; - # int numactive = 0; - # - # if (func == Py_None && n == 1) - # alist = NULL; - # else if ((alist = PyTuple_New(n)) == NULL) - # goto Fail_1; - # - # for (j = 0, sqp = seqs; j < n; ++j, ++sqp) { - # if (sqp->saw_StopIteration) { - # Py_INCREF(Py_None); - # item = Py_None; - # } - # else { - # item = PyIter_Next(sqp->it); - # if (item) - # ++numactive; - # else { - # if (PyErr_Occurred()) { - # Py_XDECREF(alist); - # goto Fail_1; - # } - # Py_INCREF(Py_None); - # item = Py_None; - # sqp->saw_StopIteration = 1; - # } - # } - # if (alist) - # PyTuple_SET_ITEM(alist, j, item); - # else - # break; - # } - # - # if (!alist) - # alist = item; - # - # if (numactive == 0) { - # Py_DECREF(alist); - # break; - # } - # - # if (func == Py_None) - # value = alist; - # else { - # value = PyEval_CallObject(func, alist); - # Py_DECREF(alist); - # if (value == NULL) - # goto Fail_1; - # } - # if (i >= len) { - # int status = PyList_Append(result, value); - # Py_DECREF(value); - # if (status < 0) - # goto Fail_1; - # } - # else if (PyList_SetItem(result, i, value) < 0) - # goto Fail_1; - # } - # - # if (i < len && PyList_SetSlice(result, i, len, NULL) < 0) - # goto Fail_1; - # - # goto Succeed; - # - # Fail_1: - # Py_DECREF(result); - # Fail_2: - # result = NULL; - # Succeed: - # assert(seqs); - # for (i = 0; i < n; ++i) - # Py_XDECREF(seqs[i].it); - # PyMem_DEL(seqs); - # return result; - # } - - def oldrange(*args, **kwargs): - return list(builtins.range(*args, **kwargs)) - - def oldzip(*args, **kwargs): - return list(builtins.zip(*args, **kwargs)) - - filter = oldfilter - map = oldmap - range = oldrange - from functools import reduce - zip = oldzip - __all__ = ['filter', 'map', 'range', 'reduce', 'zip'] - -else: - import __builtin__ - # Python 2-builtin ranges produce lists - filter = __builtin__.filter - map = __builtin__.map - range = __builtin__.range - reduce = __builtin__.reduce - zip = __builtin__.zip - __all__ = [] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/translation/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/translation/__init__.py deleted file mode 100755 index 7c67886..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/translation/__init__.py +++ /dev/null @@ -1,485 +0,0 @@ -# -*- coding: utf-8 -*- -""" -past.translation -================== - -The ``past.translation`` package provides an import hook for Python 3 which -transparently runs ``futurize`` fixers over Python 2 code on import to convert -print statements into functions, etc. - -It is intended to assist users in migrating to Python 3.x even if some -dependencies still only support Python 2.x. - -Usage ------ - -Once your Py2 package is installed in the usual module search path, the import -hook is invoked as follows: - - >>> from past.translation import autotranslate - >>> autotranslate('mypackagename') - -Or: - - >>> autotranslate(['mypackage1', 'mypackage2']) - -You can unregister the hook using:: - - >>> from past.translation import remove_hooks - >>> remove_hooks() - -Author: Ed Schofield. -Inspired by and based on ``uprefix`` by Vinay M. Sajip. -""" - -import imp -import logging -import marshal -import os -import sys -import copy -from lib2to3.pgen2.parse import ParseError -from lib2to3.refactor import RefactoringTool - -from libfuturize import fixes - - -logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) - -myfixes = (list(fixes.libfuturize_fix_names_stage1) + - list(fixes.lib2to3_fix_names_stage1) + - list(fixes.libfuturize_fix_names_stage2) + - list(fixes.lib2to3_fix_names_stage2)) - - -# We detect whether the code is Py2 or Py3 by applying certain lib2to3 fixers -# to it. If the diff is empty, it's Python 3 code. - -py2_detect_fixers = [ -# From stage 1: - 'lib2to3.fixes.fix_apply', - # 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. and move to stage2 - 'lib2to3.fixes.fix_except', - 'lib2to3.fixes.fix_execfile', - 'lib2to3.fixes.fix_exitfunc', - 'lib2to3.fixes.fix_funcattrs', - 'lib2to3.fixes.fix_filter', - 'lib2to3.fixes.fix_has_key', - 'lib2to3.fixes.fix_idioms', - 'lib2to3.fixes.fix_import', # makes any implicit relative imports explicit. (Use with ``from __future__ import absolute_import) - 'lib2to3.fixes.fix_intern', - 'lib2to3.fixes.fix_isinstance', - 'lib2to3.fixes.fix_methodattrs', - 'lib2to3.fixes.fix_ne', - 'lib2to3.fixes.fix_numliterals', # turns 1L into 1, 0755 into 0o755 - 'lib2to3.fixes.fix_paren', - 'lib2to3.fixes.fix_print', - 'lib2to3.fixes.fix_raise', # uses incompatible with_traceback() method on exceptions - 'lib2to3.fixes.fix_renames', - 'lib2to3.fixes.fix_reduce', - # 'lib2to3.fixes.fix_set_literal', # this is unnecessary and breaks Py2.6 support - 'lib2to3.fixes.fix_repr', - 'lib2to3.fixes.fix_standarderror', - 'lib2to3.fixes.fix_sys_exc', - 'lib2to3.fixes.fix_throw', - 'lib2to3.fixes.fix_tuple_params', - 'lib2to3.fixes.fix_types', - 'lib2to3.fixes.fix_ws_comma', - 'lib2to3.fixes.fix_xreadlines', - -# From stage 2: - 'lib2to3.fixes.fix_basestring', - # 'lib2to3.fixes.fix_buffer', # perhaps not safe. Test this. - # 'lib2to3.fixes.fix_callable', # not needed in Py3.2+ - # 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. - 'lib2to3.fixes.fix_exec', - # 'lib2to3.fixes.fix_future', # we don't want to remove __future__ imports - 'lib2to3.fixes.fix_getcwdu', - # 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library - # 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm) - # 'lib2to3.fixes.fix_input', - # 'lib2to3.fixes.fix_itertools', - # 'lib2to3.fixes.fix_itertools_imports', - 'lib2to3.fixes.fix_long', - # 'lib2to3.fixes.fix_map', - # 'lib2to3.fixes.fix_metaclass', # causes SyntaxError in Py2! Use the one from ``six`` instead - 'lib2to3.fixes.fix_next', - 'lib2to3.fixes.fix_nonzero', # TODO: add a decorator for mapping __bool__ to __nonzero__ - # 'lib2to3.fixes.fix_operator', # we will need support for this by e.g. extending the Py2 operator module to provide those functions in Py3 - 'lib2to3.fixes.fix_raw_input', - # 'lib2to3.fixes.fix_unicode', # strips off the u'' prefix, which removes a potentially helpful source of information for disambiguating unicode/byte strings - # 'lib2to3.fixes.fix_urllib', - 'lib2to3.fixes.fix_xrange', - # 'lib2to3.fixes.fix_zip', -] - - -class RTs: - """ - A namespace for the refactoring tools. This avoids creating these at - the module level, which slows down the module import. (See issue #117). - - There are two possible grammars: with or without the print statement. - Hence we have two possible refactoring tool implementations. - """ - _rt = None - _rtp = None - _rt_py2_detect = None - _rtp_py2_detect = None - - @staticmethod - def setup(): - """ - Call this before using the refactoring tools to create them on demand - if needed. - """ - if None in [RTs._rt, RTs._rtp]: - RTs._rt = RefactoringTool(myfixes) - RTs._rtp = RefactoringTool(myfixes, {'print_function': True}) - - - @staticmethod - def setup_detect_python2(): - """ - Call this before using the refactoring tools to create them on demand - if needed. - """ - if None in [RTs._rt_py2_detect, RTs._rtp_py2_detect]: - RTs._rt_py2_detect = RefactoringTool(py2_detect_fixers) - RTs._rtp_py2_detect = RefactoringTool(py2_detect_fixers, - {'print_function': True}) - - -# We need to find a prefix for the standard library, as we don't want to -# process any files there (they will already be Python 3). -# -# The following method is used by Sanjay Vinip in uprefix. This fails for -# ``conda`` environments: -# # In a non-pythonv virtualenv, sys.real_prefix points to the installed Python. -# # In a pythonv venv, sys.base_prefix points to the installed Python. -# # Outside a virtual environment, sys.prefix points to the installed Python. - -# if hasattr(sys, 'real_prefix'): -# _syslibprefix = sys.real_prefix -# else: -# _syslibprefix = getattr(sys, 'base_prefix', sys.prefix) - -# Instead, we use the portion of the path common to both the stdlib modules -# ``math`` and ``urllib``. - -def splitall(path): - """ - Split a path into all components. From Python Cookbook. - """ - allparts = [] - while True: - parts = os.path.split(path) - if parts[0] == path: # sentinel for absolute paths - allparts.insert(0, parts[0]) - break - elif parts[1] == path: # sentinel for relative paths - allparts.insert(0, parts[1]) - break - else: - path = parts[0] - allparts.insert(0, parts[1]) - return allparts - - -def common_substring(s1, s2): - """ - Returns the longest common substring to the two strings, starting from the - left. - """ - chunks = [] - path1 = splitall(s1) - path2 = splitall(s2) - for (dir1, dir2) in zip(path1, path2): - if dir1 != dir2: - break - chunks.append(dir1) - return os.path.join(*chunks) - -# _stdlibprefix = common_substring(math.__file__, urllib.__file__) - - -def detect_python2(source, pathname): - """ - Returns a bool indicating whether we think the code is Py2 - """ - RTs.setup_detect_python2() - try: - tree = RTs._rt_py2_detect.refactor_string(source, pathname) - except ParseError as e: - if e.msg != 'bad input' or e.value != '=': - raise - tree = RTs._rtp.refactor_string(source, pathname) - - if source != str(tree)[:-1]: # remove added newline - # The above fixers made changes, so we conclude it's Python 2 code - logger.debug('Detected Python 2 code: {0}'.format(pathname)) - return True - else: - logger.debug('Detected Python 3 code: {0}'.format(pathname)) - return False - - -class Py2Fixer(object): - """ - An import hook class that uses lib2to3 for source-to-source translation of - Py2 code to Py3. - """ - - # See the comments on :class:future.standard_library.RenameImport. - # We add this attribute here so remove_hooks() and install_hooks() can - # unambiguously detect whether the import hook is installed: - PY2FIXER = True - - def __init__(self): - self.found = None - self.base_exclude_paths = ['future', 'past'] - self.exclude_paths = copy.copy(self.base_exclude_paths) - self.include_paths = [] - - def include(self, paths): - """ - Pass in a sequence of module names such as 'plotrique.plotting' that, - if present at the leftmost side of the full package name, would - specify the module to be transformed from Py2 to Py3. - """ - self.include_paths += paths - - def exclude(self, paths): - """ - Pass in a sequence of strings such as 'mymodule' that, if - present at the leftmost side of the full package name, would cause - the module not to undergo any source transformation. - """ - self.exclude_paths += paths - - def find_module(self, fullname, path=None): - logger.debug('Running find_module: {0}...'.format(fullname)) - if '.' in fullname: - parent, child = fullname.rsplit('.', 1) - if path is None: - loader = self.find_module(parent, path) - mod = loader.load_module(parent) - path = mod.__path__ - fullname = child - - # Perhaps we should try using the new importlib functionality in Python - # 3.3: something like this? - # thing = importlib.machinery.PathFinder.find_module(fullname, path) - try: - self.found = imp.find_module(fullname, path) - except Exception as e: - logger.debug('Py2Fixer could not find {0}') - logger.debug('Exception was: {0})'.format(fullname, e)) - return None - self.kind = self.found[-1][-1] - if self.kind == imp.PKG_DIRECTORY: - self.pathname = os.path.join(self.found[1], '__init__.py') - elif self.kind == imp.PY_SOURCE: - self.pathname = self.found[1] - return self - - def transform(self, source): - # This implementation uses lib2to3, - # you can override and use something else - # if that's better for you - - # lib2to3 likes a newline at the end - RTs.setup() - source += '\n' - try: - tree = RTs._rt.refactor_string(source, self.pathname) - except ParseError as e: - if e.msg != 'bad input' or e.value != '=': - raise - tree = RTs._rtp.refactor_string(source, self.pathname) - # could optimise a bit for only doing str(tree) if - # getattr(tree, 'was_changed', False) returns True - return str(tree)[:-1] # remove added newline - - def load_module(self, fullname): - logger.debug('Running load_module for {0}...'.format(fullname)) - if fullname in sys.modules: - mod = sys.modules[fullname] - else: - if self.kind in (imp.PY_COMPILED, imp.C_EXTENSION, imp.C_BUILTIN, - imp.PY_FROZEN): - convert = False - # elif (self.pathname.startswith(_stdlibprefix) - # and 'site-packages' not in self.pathname): - # # We assume it's a stdlib package in this case. Is this too brittle? - # # Please file a bug report at https://github.com/PythonCharmers/python-future - # # if so. - # convert = False - # in theory, other paths could be configured to be excluded here too - elif any([fullname.startswith(path) for path in self.exclude_paths]): - convert = False - elif any([fullname.startswith(path) for path in self.include_paths]): - convert = True - else: - convert = False - if not convert: - logger.debug('Excluded {0} from translation'.format(fullname)) - mod = imp.load_module(fullname, *self.found) - else: - logger.debug('Autoconverting {0} ...'.format(fullname)) - mod = imp.new_module(fullname) - sys.modules[fullname] = mod - - # required by PEP 302 - mod.__file__ = self.pathname - mod.__name__ = fullname - mod.__loader__ = self - - # This: - # mod.__package__ = '.'.join(fullname.split('.')[:-1]) - # seems to result in "SystemError: Parent module '' not loaded, - # cannot perform relative import" for a package's __init__.py - # file. We use the approach below. Another option to try is the - # minimal load_module pattern from the PEP 302 text instead. - - # Is the test in the next line more or less robust than the - # following one? Presumably less ... - # ispkg = self.pathname.endswith('__init__.py') - - if self.kind == imp.PKG_DIRECTORY: - mod.__path__ = [ os.path.dirname(self.pathname) ] - mod.__package__ = fullname - else: - #else, regular module - mod.__path__ = [] - mod.__package__ = fullname.rpartition('.')[0] - - try: - cachename = imp.cache_from_source(self.pathname) - if not os.path.exists(cachename): - update_cache = True - else: - sourcetime = os.stat(self.pathname).st_mtime - cachetime = os.stat(cachename).st_mtime - update_cache = cachetime < sourcetime - # # Force update_cache to work around a problem with it being treated as Py3 code??? - # update_cache = True - if not update_cache: - with open(cachename, 'rb') as f: - data = f.read() - try: - code = marshal.loads(data) - except Exception: - # pyc could be corrupt. Regenerate it - update_cache = True - if update_cache: - if self.found[0]: - source = self.found[0].read() - elif self.kind == imp.PKG_DIRECTORY: - with open(self.pathname) as f: - source = f.read() - - if detect_python2(source, self.pathname): - source = self.transform(source) - - code = compile(source, self.pathname, 'exec') - - dirname = os.path.dirname(cachename) - try: - if not os.path.exists(dirname): - os.makedirs(dirname) - with open(cachename, 'wb') as f: - data = marshal.dumps(code) - f.write(data) - except Exception: # could be write-protected - pass - exec(code, mod.__dict__) - except Exception as e: - # must remove module from sys.modules - del sys.modules[fullname] - raise # keep it simple - - if self.found[0]: - self.found[0].close() - return mod - -_hook = Py2Fixer() - - -def install_hooks(include_paths=(), exclude_paths=()): - if isinstance(include_paths, str): - include_paths = (include_paths,) - if isinstance(exclude_paths, str): - exclude_paths = (exclude_paths,) - assert len(include_paths) + len(exclude_paths) > 0, 'Pass at least one argument' - _hook.include(include_paths) - _hook.exclude(exclude_paths) - # _hook.debug = debug - enable = sys.version_info[0] >= 3 # enabled for all 3.x+ - if enable and _hook not in sys.meta_path: - sys.meta_path.insert(0, _hook) # insert at beginning. This could be made a parameter - - # We could return the hook when there are ways of configuring it - #return _hook - - -def remove_hooks(): - if _hook in sys.meta_path: - sys.meta_path.remove(_hook) - - -def detect_hooks(): - """ - Returns True if the import hooks are installed, False if not. - """ - return _hook in sys.meta_path - # present = any([hasattr(hook, 'PY2FIXER') for hook in sys.meta_path]) - # return present - - -class hooks(object): - """ - Acts as a context manager. Use like this: - - >>> from past import translation - >>> with translation.hooks(): - ... import mypy2module - >>> import requests # py2/3 compatible anyway - >>> # etc. - """ - def __enter__(self): - self.hooks_were_installed = detect_hooks() - install_hooks() - return self - - def __exit__(self, *args): - if not self.hooks_were_installed: - remove_hooks() - - -class suspend_hooks(object): - """ - Acts as a context manager. Use like this: - - >>> from past import translation - >>> translation.install_hooks() - >>> import http.client - >>> # ... - >>> with translation.suspend_hooks(): - >>> import requests # or others that support Py2/3 - - If the hooks were disabled before the context, they are not installed when - the context is left. - """ - def __enter__(self): - self.hooks_were_installed = detect_hooks() - remove_hooks() - return self - def __exit__(self, *args): - if self.hooks_were_installed: - install_hooks() - - -# alias -autotranslate = install_hooks diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/__init__.py deleted file mode 100755 index 91dd270..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Forward-ports of types from Python 2 for use with Python 3: - -- ``basestring``: equivalent to ``(str, bytes)`` in ``isinstance`` checks -- ``dict``: with list-producing .keys() etc. methods -- ``str``: bytes-like, but iterating over them doesn't product integers -- ``long``: alias of Py3 int with ``L`` suffix in the ``repr`` -- ``unicode``: alias of Py3 str with ``u`` prefix in the ``repr`` - -""" - -from past import utils - -if utils.PY2: - import __builtin__ - basestring = __builtin__.basestring - dict = __builtin__.dict - str = __builtin__.str - long = __builtin__.long - unicode = __builtin__.unicode - __all__ = [] -else: - from .basestring import basestring - from .olddict import olddict - from .oldstr import oldstr - long = int - unicode = str - # from .unicode import unicode - __all__ = ['basestring', 'olddict', 'oldstr', 'long', 'unicode'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/basestring.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/basestring.py deleted file mode 100755 index 1cab22f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/basestring.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -An implementation of the basestring type for Python 3 - -Example use: - ->>> s = b'abc' ->>> assert isinstance(s, basestring) ->>> from past.types import str as oldstr ->>> s2 = oldstr(b'abc') ->>> assert isinstance(s2, basestring) - -""" - -import sys - -from past.utils import with_metaclass, PY2 - -if PY2: - str = unicode - -ver = sys.version_info[:2] - - -class BaseBaseString(type): - def __instancecheck__(cls, instance): - return isinstance(instance, (bytes, str)) - - def __subclasshook__(cls, thing): - # TODO: What should go here? - raise NotImplemented - - -class basestring(with_metaclass(BaseBaseString)): - """ - A minimal backport of the Python 2 basestring type to Py3 - """ - - -__all__ = ['basestring'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/olddict.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/olddict.py deleted file mode 100755 index f4f92a2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/olddict.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -A dict subclass for Python 3 that behaves like Python 2's dict - -Example use: - ->>> from past.builtins import dict ->>> d1 = dict() # instead of {} for an empty dict ->>> d2 = dict(key1='value1', key2='value2') - -The keys, values and items methods now return lists on Python 3.x and there are -methods for iterkeys, itervalues, iteritems, and viewkeys etc. - ->>> for d in (d1, d2): -... assert isinstance(d.keys(), list) -... assert isinstance(d.values(), list) -... assert isinstance(d.items(), list) -""" - -import sys - -from past.utils import with_metaclass - - -_builtin_dict = dict -ver = sys.version_info[:2] - - -class BaseOldDict(type): - def __instancecheck__(cls, instance): - return isinstance(instance, _builtin_dict) - - -class olddict(with_metaclass(BaseOldDict, _builtin_dict)): - """ - A backport of the Python 3 dict object to Py2 - """ - iterkeys = _builtin_dict.keys - viewkeys = _builtin_dict.keys - - def keys(self): - return list(super(olddict, self).keys()) - - itervalues = _builtin_dict.values - viewvalues = _builtin_dict.values - - def values(self): - return list(super(olddict, self).values()) - - iteritems = _builtin_dict.items - viewitems = _builtin_dict.items - - def items(self): - return list(super(olddict, self).items()) - - def has_key(self, k): - """ - D.has_key(k) -> True if D has a key k, else False - """ - return k in self - - # def __new__(cls, *args, **kwargs): - # """ - # dict() -> new empty dictionary - # dict(mapping) -> new dictionary initialized from a mapping object's - # (key, value) pairs - # dict(iterable) -> new dictionary initialized as if via: - # d = {} - # for k, v in iterable: - # d[k] = v - # dict(**kwargs) -> new dictionary initialized with the name=value pairs - # in the keyword argument list. For example: dict(one=1, two=2) - - # """ - # - # if len(args) == 0: - # return super(olddict, cls).__new__(cls) - # # Was: elif isinstance(args[0], newbytes): - # # We use type() instead of the above because we're redefining - # # this to be True for all unicode string subclasses. Warning: - # # This may render newstr un-subclassable. - # elif type(args[0]) == olddict: - # return args[0] - # # elif isinstance(args[0], _builtin_dict): - # # value = args[0] - # else: - # value = args[0] - # return super(olddict, cls).__new__(cls, value) - - def __native__(self): - """ - Hook for the past.utils.native() function - """ - return super(oldbytes, self) - - -__all__ = ['olddict'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/oldstr.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/oldstr.py deleted file mode 100755 index a477d88..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/types/oldstr.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -Pure-Python implementation of a Python 2-like str object for Python 3. -""" - -from numbers import Integral - -from past.utils import PY2, with_metaclass - -if PY2: - from collections import Iterable -else: - from collections.abc import Iterable - -_builtin_bytes = bytes - - -class BaseOldStr(type): - def __instancecheck__(cls, instance): - return isinstance(instance, _builtin_bytes) - - -def unescape(s): - """ - Interprets strings with escape sequences - - Example: - >>> s = unescape(r'abc\\def') # i.e. 'abc\\\\def' - >>> print(s) - 'abc\def' - >>> s2 = unescape('abc\\ndef') - >>> len(s2) - 8 - >>> print(s2) - abc - def - """ - return s.encode().decode('unicode_escape') - - -class oldstr(with_metaclass(BaseOldStr, _builtin_bytes)): - """ - A forward port of the Python 2 8-bit string object to Py3 - """ - # Python 2 strings have no __iter__ method: - @property - def __iter__(self): - raise AttributeError - - def __dir__(self): - return [thing for thing in dir(_builtin_bytes) if thing != '__iter__'] - - # def __new__(cls, *args, **kwargs): - # """ - # From the Py3 bytes docstring: - - # bytes(iterable_of_ints) -> bytes - # bytes(string, encoding[, errors]) -> bytes - # bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer - # bytes(int) -> bytes object of size given by the parameter initialized with null bytes - # bytes() -> empty bytes object - # - # Construct an immutable array of bytes from: - # - an iterable yielding integers in range(256) - # - a text string encoded using the specified encoding - # - any object implementing the buffer API. - # - an integer - # """ - # - # if len(args) == 0: - # return super(newbytes, cls).__new__(cls) - # # Was: elif isinstance(args[0], newbytes): - # # We use type() instead of the above because we're redefining - # # this to be True for all unicode string subclasses. Warning: - # # This may render newstr un-subclassable. - # elif type(args[0]) == newbytes: - # return args[0] - # elif isinstance(args[0], _builtin_bytes): - # value = args[0] - # elif isinstance(args[0], unicode): - # if 'encoding' not in kwargs: - # raise TypeError('unicode string argument without an encoding') - # ### - # # Was: value = args[0].encode(**kwargs) - # # Python 2.6 string encode() method doesn't take kwargs: - # # Use this instead: - # newargs = [kwargs['encoding']] - # if 'errors' in kwargs: - # newargs.append(kwargs['errors']) - # value = args[0].encode(*newargs) - # ### - # elif isinstance(args[0], Iterable): - # if len(args[0]) == 0: - # # What is this? - # raise ValueError('unknown argument type') - # elif len(args[0]) > 0 and isinstance(args[0][0], Integral): - # # It's a list of integers - # value = b''.join([chr(x) for x in args[0]]) - # else: - # raise ValueError('item cannot be interpreted as an integer') - # elif isinstance(args[0], Integral): - # if args[0] < 0: - # raise ValueError('negative count') - # value = b'\x00' * args[0] - # else: - # value = args[0] - # return super(newbytes, cls).__new__(cls, value) - - def __repr__(self): - s = super(oldstr, self).__repr__() # e.g. b'abc' on Py3, b'abc' on Py3 - return s[1:] - - def __str__(self): - s = super(oldstr, self).__str__() # e.g. "b'abc'" or "b'abc\\ndef' - # TODO: fix this: - assert s[:2] == "b'" and s[-1] == "'" - return unescape(s[2:-1]) # e.g. 'abc' or 'abc\ndef' - - def __getitem__(self, y): - if isinstance(y, Integral): - return super(oldstr, self).__getitem__(slice(y, y+1)) - else: - return super(oldstr, self).__getitem__(y) - - def __getslice__(self, *args): - return self.__getitem__(slice(*args)) - - def __contains__(self, key): - if isinstance(key, int): - return False - - def __native__(self): - return bytes(self) - - -__all__ = ['oldstr'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/utils/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/utils/__init__.py deleted file mode 100755 index f6b2642..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/past/utils/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Various non-built-in utility functions and definitions for Py2 -compatibility in Py3. - -For example: - - >>> # The old_div() function behaves like Python 2's / operator - >>> # without "from __future__ import division" - >>> from past.utils import old_div - >>> old_div(3, 2) # like 3/2 in Py2 - 0 - >>> old_div(3, 2.0) # like 3/2.0 in Py2 - 1.5 -""" - -import sys -import numbers - -PY3 = sys.version_info[0] >= 3 -PY2 = sys.version_info[0] == 2 -PYPY = hasattr(sys, 'pypy_translation_info') - - -def with_metaclass(meta, *bases): - """ - Function from jinja2/_compat.py. License: BSD. - - Use it like this:: - - class BaseForm(object): - pass - - class FormType(type): - pass - - class Form(with_metaclass(FormType, BaseForm)): - pass - - This requires a bit of explanation: the basic idea is to make a - dummy metaclass for one level of class instantiation that replaces - itself with the actual metaclass. Because of internal type checks - we also need to make sure that we downgrade the custom metaclass - for one level to something closer to type (that's why __call__ and - __init__ comes back from type etc.). - - This has the advantage over six.with_metaclass of not introducing - dummy classes into the final MRO. - """ - class metaclass(meta): - __call__ = type.__call__ - __init__ = type.__init__ - def __new__(cls, name, this_bases, d): - if this_bases is None: - return type.__new__(cls, name, (), d) - return meta(name, bases, d) - return metaclass('temporary_class', None, {}) - - -def native(obj): - """ - On Py2, this is a no-op: native(obj) -> obj - - On Py3, returns the corresponding native Py3 types that are - superclasses for forward-ported objects from Py2: - - >>> from past.builtins import str, dict - - >>> native(str(b'ABC')) # Output on Py3 follows. On Py2, output is 'ABC' - b'ABC' - >>> type(native(str(b'ABC'))) - bytes - - Existing native types on Py3 will be returned unchanged: - - >>> type(native(b'ABC')) - bytes - """ - if hasattr(obj, '__native__'): - return obj.__native__() - else: - return obj - - -# An alias for future.utils.old_div(): -def old_div(a, b): - """ - Equivalent to ``a / b`` on Python 2 without ``from __future__ import - division``. - - TODO: generalize this to other objects (like arrays etc.) - """ - if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral): - return a // b - else: - return a / b - -__all__ = ['PY3', 'PY2', 'PYPY', 'with_metaclass', 'native', 'old_div'] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/__init__.py deleted file mode 100755 index 852476e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/__init__.py +++ /dev/null @@ -1,3283 +0,0 @@ -""" -Package resource API --------------------- - -A resource is a logical file contained within a package, or a logical -subdirectory thereof. The package resource API expects resource names -to have their path parts separated with ``/``, *not* whatever the local -path separator is. Do not use os.path operations to manipulate resource -names being passed into the API. - -The package resource API is designed to work with normal filesystem packages, -.egg files, and unpacked .egg files. It can also work in a limited way with -.zip files and with custom PEP 302 loaders that support the ``get_data()`` -method. -""" - -import sys -import os -import io -import time -import re -import types -import zipfile -import zipimport -import warnings -import stat -import functools -import pkgutil -import operator -import platform -import collections -import plistlib -import email.parser -import errno -import tempfile -import textwrap -import itertools -import inspect -import ntpath -import posixpath -import importlib -from pkgutil import get_importer - -try: - import _imp -except ImportError: - # Python 3.2 compatibility - import imp as _imp - -try: - FileExistsError -except NameError: - FileExistsError = OSError - -# capture these to bypass sandboxing -from os import utime -try: - from os import mkdir, rename, unlink - WRITE_SUPPORT = True -except ImportError: - # no write support, probably under GAE - WRITE_SUPPORT = False - -from os import open as os_open -from os.path import isdir, split - -try: - import importlib.machinery as importlib_machinery - # access attribute to force import under delayed import mechanisms. - importlib_machinery.__name__ -except ImportError: - importlib_machinery = None - -from pkg_resources.extern.jaraco.text import ( - yield_lines, - drop_comment, - join_continuation, -) - -from pkg_resources.extern import appdirs -from pkg_resources.extern import packaging -__import__('pkg_resources.extern.packaging.version') -__import__('pkg_resources.extern.packaging.specifiers') -__import__('pkg_resources.extern.packaging.requirements') -__import__('pkg_resources.extern.packaging.markers') - -if sys.version_info < (3, 5): - raise RuntimeError("Python 3.5 or later is required") - -# declare some globals that will be defined later to -# satisfy the linters. -require = None -working_set = None -add_activation_listener = None -resources_stream = None -cleanup_resources = None -resource_dir = None -resource_stream = None -set_extraction_path = None -resource_isdir = None -resource_string = None -iter_entry_points = None -resource_listdir = None -resource_filename = None -resource_exists = None -_distribution_finders = None -_namespace_handlers = None -_namespace_packages = None - - -class PEP440Warning(RuntimeWarning): - """ - Used when there is an issue with a version or specifier not complying with - PEP 440. - """ - - -def parse_version(v): - try: - return packaging.version.Version(v) - except packaging.version.InvalidVersion: - warnings.warn( - f"{v} is an invalid version and will not be supported in " - "a future release", - PkgResourcesDeprecationWarning, - ) - return packaging.version.LegacyVersion(v) - - -_state_vars = {} - - -def _declare_state(vartype, **kw): - globals().update(kw) - _state_vars.update(dict.fromkeys(kw, vartype)) - - -def __getstate__(): - state = {} - g = globals() - for k, v in _state_vars.items(): - state[k] = g['_sget_' + v](g[k]) - return state - - -def __setstate__(state): - g = globals() - for k, v in state.items(): - g['_sset_' + _state_vars[k]](k, g[k], v) - return state - - -def _sget_dict(val): - return val.copy() - - -def _sset_dict(key, ob, state): - ob.clear() - ob.update(state) - - -def _sget_object(val): - return val.__getstate__() - - -def _sset_object(key, ob, state): - ob.__setstate__(state) - - -_sget_none = _sset_none = lambda *args: None - - -def get_supported_platform(): - """Return this platform's maximum compatible version. - - distutils.util.get_platform() normally reports the minimum version - of macOS that would be required to *use* extensions produced by - distutils. But what we want when checking compatibility is to know the - version of macOS that we are *running*. To allow usage of packages that - explicitly require a newer version of macOS, we must also know the - current version of the OS. - - If this condition occurs for any other platform with a version in its - platform strings, this function should be extended accordingly. - """ - plat = get_build_platform() - m = macosVersionString.match(plat) - if m is not None and sys.platform == "darwin": - try: - plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3)) - except ValueError: - # not macOS - pass - return plat - - -__all__ = [ - # Basic resource access and distribution/entry point discovery - 'require', 'run_script', 'get_provider', 'get_distribution', - 'load_entry_point', 'get_entry_map', 'get_entry_info', - 'iter_entry_points', - 'resource_string', 'resource_stream', 'resource_filename', - 'resource_listdir', 'resource_exists', 'resource_isdir', - - # Environmental control - 'declare_namespace', 'working_set', 'add_activation_listener', - 'find_distributions', 'set_extraction_path', 'cleanup_resources', - 'get_default_cache', - - # Primary implementation classes - 'Environment', 'WorkingSet', 'ResourceManager', - 'Distribution', 'Requirement', 'EntryPoint', - - # Exceptions - 'ResolutionError', 'VersionConflict', 'DistributionNotFound', - 'UnknownExtra', 'ExtractionError', - - # Warnings - 'PEP440Warning', - - # Parsing functions and string utilities - 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', - 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', - 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', - - # filesystem utilities - 'ensure_directory', 'normalize_path', - - # Distribution "precedence" constants - 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', - - # "Provider" interfaces, implementations, and registration/lookup APIs - 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', - 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', - 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', - 'register_finder', 'register_namespace_handler', 'register_loader_type', - 'fixup_namespace_packages', 'get_importer', - - # Warnings - 'PkgResourcesDeprecationWarning', - - # Deprecated/backward compatibility only - 'run_main', 'AvailableDistributions', -] - - -class ResolutionError(Exception): - """Abstract base for dependency resolution errors""" - - def __repr__(self): - return self.__class__.__name__ + repr(self.args) - - -class VersionConflict(ResolutionError): - """ - An already-installed version conflicts with the requested version. - - Should be initialized with the installed Distribution and the requested - Requirement. - """ - - _template = "{self.dist} is installed but {self.req} is required" - - @property - def dist(self): - return self.args[0] - - @property - def req(self): - return self.args[1] - - def report(self): - return self._template.format(**locals()) - - def with_context(self, required_by): - """ - If required_by is non-empty, return a version of self that is a - ContextualVersionConflict. - """ - if not required_by: - return self - args = self.args + (required_by,) - return ContextualVersionConflict(*args) - - -class ContextualVersionConflict(VersionConflict): - """ - A VersionConflict that accepts a third parameter, the set of the - requirements that required the installed Distribution. - """ - - _template = VersionConflict._template + ' by {self.required_by}' - - @property - def required_by(self): - return self.args[2] - - -class DistributionNotFound(ResolutionError): - """A requested distribution was not found""" - - _template = ("The '{self.req}' distribution was not found " - "and is required by {self.requirers_str}") - - @property - def req(self): - return self.args[0] - - @property - def requirers(self): - return self.args[1] - - @property - def requirers_str(self): - if not self.requirers: - return 'the application' - return ', '.join(self.requirers) - - def report(self): - return self._template.format(**locals()) - - def __str__(self): - return self.report() - - -class UnknownExtra(ResolutionError): - """Distribution doesn't have an "extra feature" of the given name""" - - -_provider_factories = {} - -PY_MAJOR = '{}.{}'.format(*sys.version_info) -EGG_DIST = 3 -BINARY_DIST = 2 -SOURCE_DIST = 1 -CHECKOUT_DIST = 0 -DEVELOP_DIST = -1 - - -def register_loader_type(loader_type, provider_factory): - """Register `provider_factory` to make providers for `loader_type` - - `loader_type` is the type or class of a PEP 302 ``module.__loader__``, - and `provider_factory` is a function that, passed a *module* object, - returns an ``IResourceProvider`` for that module. - """ - _provider_factories[loader_type] = provider_factory - - -def get_provider(moduleOrReq): - """Return an IResourceProvider for the named module or requirement""" - if isinstance(moduleOrReq, Requirement): - return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] - try: - module = sys.modules[moduleOrReq] - except KeyError: - __import__(moduleOrReq) - module = sys.modules[moduleOrReq] - loader = getattr(module, '__loader__', None) - return _find_adapter(_provider_factories, loader)(module) - - -def _macos_vers(_cache=[]): - if not _cache: - version = platform.mac_ver()[0] - # fallback for MacPorts - if version == '': - plist = '/System/Library/CoreServices/SystemVersion.plist' - if os.path.exists(plist): - if hasattr(plistlib, 'readPlist'): - plist_content = plistlib.readPlist(plist) - if 'ProductVersion' in plist_content: - version = plist_content['ProductVersion'] - - _cache.append(version.split('.')) - return _cache[0] - - -def _macos_arch(machine): - return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) - - -def get_build_platform(): - """Return this platform's string for platform-specific distributions - - XXX Currently this is the same as ``distutils.util.get_platform()``, but it - needs some hacks for Linux and macOS. - """ - from sysconfig import get_platform - - plat = get_platform() - if sys.platform == "darwin" and not plat.startswith('macosx-'): - try: - version = _macos_vers() - machine = os.uname()[4].replace(" ", "_") - return "macosx-%d.%d-%s" % ( - int(version[0]), int(version[1]), - _macos_arch(machine), - ) - except ValueError: - # if someone is running a non-Mac darwin system, this will fall - # through to the default implementation - pass - return plat - - -macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") -darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") -# XXX backward compat -get_platform = get_build_platform - - -def compatible_platforms(provided, required): - """Can code for the `provided` platform run on the `required` platform? - - Returns true if either platform is ``None``, or the platforms are equal. - - XXX Needs compatibility checks for Linux and other unixy OSes. - """ - if provided is None or required is None or provided == required: - # easy case - return True - - # macOS special cases - reqMac = macosVersionString.match(required) - if reqMac: - provMac = macosVersionString.match(provided) - - # is this a Mac package? - if not provMac: - # this is backwards compatibility for packages built before - # setuptools 0.6. All packages built after this point will - # use the new macOS designation. - provDarwin = darwinVersionString.match(provided) - if provDarwin: - dversion = int(provDarwin.group(1)) - macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) - if dversion == 7 and macosversion >= "10.3" or \ - dversion == 8 and macosversion >= "10.4": - return True - # egg isn't macOS or legacy darwin - return False - - # are they the same major version and machine type? - if provMac.group(1) != reqMac.group(1) or \ - provMac.group(3) != reqMac.group(3): - return False - - # is the required OS major update >= the provided one? - if int(provMac.group(2)) > int(reqMac.group(2)): - return False - - return True - - # XXX Linux and other platforms' special cases should go here - return False - - -def run_script(dist_spec, script_name): - """Locate distribution `dist_spec` and run its `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - require(dist_spec)[0].run_script(script_name, ns) - - -# backward compatibility -run_main = run_script - - -def get_distribution(dist): - """Return a current distribution object for a Requirement or string""" - if isinstance(dist, str): - dist = Requirement.parse(dist) - if isinstance(dist, Requirement): - dist = get_provider(dist) - if not isinstance(dist, Distribution): - raise TypeError("Expected string, Requirement, or Distribution", dist) - return dist - - -def load_entry_point(dist, group, name): - """Return `name` entry point of `group` for `dist` or raise ImportError""" - return get_distribution(dist).load_entry_point(group, name) - - -def get_entry_map(dist, group=None): - """Return the entry point map for `group`, or the full entry map""" - return get_distribution(dist).get_entry_map(group) - - -def get_entry_info(dist, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return get_distribution(dist).get_entry_info(group, name) - - -class IMetadataProvider: - def has_metadata(name): - """Does the package's distribution contain the named metadata?""" - - def get_metadata(name): - """The named metadata resource as a string""" - - def get_metadata_lines(name): - """Yield named metadata resource as list of non-blank non-comment lines - - Leading and trailing whitespace is stripped from each line, and lines - with ``#`` as the first non-blank character are omitted.""" - - def metadata_isdir(name): - """Is the named metadata a directory? (like ``os.path.isdir()``)""" - - def metadata_listdir(name): - """List of metadata names in the directory (like ``os.listdir()``)""" - - def run_script(script_name, namespace): - """Execute the named script in the supplied namespace dictionary""" - - -class IResourceProvider(IMetadataProvider): - """An object that provides access to package resources""" - - def get_resource_filename(manager, resource_name): - """Return a true filesystem path for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_stream(manager, resource_name): - """Return a readable file-like object for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_string(manager, resource_name): - """Return a string containing the contents of `resource_name` - - `manager` must be an ``IResourceManager``""" - - def has_resource(resource_name): - """Does the package contain the named resource?""" - - def resource_isdir(resource_name): - """Is the named resource a directory? (like ``os.path.isdir()``)""" - - def resource_listdir(resource_name): - """List of resource names in the directory (like ``os.listdir()``)""" - - -class WorkingSet: - """A collection of active distributions on sys.path (or a similar list)""" - - def __init__(self, entries=None): - """Create working set from list of path entries (default=sys.path)""" - self.entries = [] - self.entry_keys = {} - self.by_key = {} - self.callbacks = [] - - if entries is None: - entries = sys.path - - for entry in entries: - self.add_entry(entry) - - @classmethod - def _build_master(cls): - """ - Prepare the master working set. - """ - ws = cls() - try: - from __main__ import __requires__ - except ImportError: - # The main program does not list any requirements - return ws - - # ensure the requirements are met - try: - ws.require(__requires__) - except VersionConflict: - return cls._build_from_requirements(__requires__) - - return ws - - @classmethod - def _build_from_requirements(cls, req_spec): - """ - Build a working set from a requirement spec. Rewrites sys.path. - """ - # try it without defaults already on sys.path - # by starting with an empty path - ws = cls([]) - reqs = parse_requirements(req_spec) - dists = ws.resolve(reqs, Environment()) - for dist in dists: - ws.add(dist) - - # add any missing entries from sys.path - for entry in sys.path: - if entry not in ws.entries: - ws.add_entry(entry) - - # then copy back to sys.path - sys.path[:] = ws.entries - return ws - - def add_entry(self, entry): - """Add a path item to ``.entries``, finding any distributions on it - - ``find_distributions(entry, True)`` is used to find distributions - corresponding to the path entry, and they are added. `entry` is - always appended to ``.entries``, even if it is already present. - (This is because ``sys.path`` can contain the same value more than - once, and the ``.entries`` of the ``sys.path`` WorkingSet should always - equal ``sys.path``.) - """ - self.entry_keys.setdefault(entry, []) - self.entries.append(entry) - for dist in find_distributions(entry, True): - self.add(dist, entry, False) - - def __contains__(self, dist): - """True if `dist` is the active distribution for its project""" - return self.by_key.get(dist.key) == dist - - def find(self, req): - """Find a distribution matching requirement `req` - - If there is an active distribution for the requested project, this - returns it as long as it meets the version requirement specified by - `req`. But, if there is an active distribution for the project and it - does *not* meet the `req` requirement, ``VersionConflict`` is raised. - If there is no active distribution for the requested project, ``None`` - is returned. - """ - dist = self.by_key.get(req.key) - if dist is not None and dist not in req: - # XXX add more info - raise VersionConflict(dist, req) - return dist - - def iter_entry_points(self, group, name=None): - """Yield entry point objects from `group` matching `name` - - If `name` is None, yields all entry points in `group` from all - distributions in the working set, otherwise only ones matching - both `group` and `name` are yielded (in distribution order). - """ - return ( - entry - for dist in self - for entry in dist.get_entry_map(group).values() - if name is None or name == entry.name - ) - - def run_script(self, requires, script_name): - """Locate distribution for `requires` and run `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - self.require(requires)[0].run_script(script_name, ns) - - def __iter__(self): - """Yield distributions for non-duplicate projects in the working set - - The yield order is the order in which the items' path entries were - added to the working set. - """ - seen = {} - for item in self.entries: - if item not in self.entry_keys: - # workaround a cache issue - continue - - for key in self.entry_keys[item]: - if key not in seen: - seen[key] = 1 - yield self.by_key[key] - - def add(self, dist, entry=None, insert=True, replace=False): - """Add `dist` to working set, associated with `entry` - - If `entry` is unspecified, it defaults to the ``.location`` of `dist`. - On exit from this routine, `entry` is added to the end of the working - set's ``.entries`` (if it wasn't already present). - - `dist` is only added to the working set if it's for a project that - doesn't already have a distribution in the set, unless `replace=True`. - If it's added, any callbacks registered with the ``subscribe()`` method - will be called. - """ - if insert: - dist.insert_on(self.entries, entry, replace=replace) - - if entry is None: - entry = dist.location - keys = self.entry_keys.setdefault(entry, []) - keys2 = self.entry_keys.setdefault(dist.location, []) - if not replace and dist.key in self.by_key: - # ignore hidden distros - return - - self.by_key[dist.key] = dist - if dist.key not in keys: - keys.append(dist.key) - if dist.key not in keys2: - keys2.append(dist.key) - self._added_new(dist) - - # FIXME: 'WorkingSet.resolve' is too complex (11) - def resolve(self, requirements, env=None, installer=None, # noqa: C901 - replace_conflicting=False, extras=None): - """List all distributions needed to (recursively) meet `requirements` - - `requirements` must be a sequence of ``Requirement`` objects. `env`, - if supplied, should be an ``Environment`` instance. If - not supplied, it defaults to all distributions available within any - entry or distribution in the working set. `installer`, if supplied, - will be invoked with each requirement that cannot be met by an - already-installed distribution; it should return a ``Distribution`` or - ``None``. - - Unless `replace_conflicting=True`, raises a VersionConflict exception - if - any requirements are found on the path that have the correct name but - the wrong version. Otherwise, if an `installer` is supplied it will be - invoked to obtain the correct version of the requirement and activate - it. - - `extras` is a list of the extras to be used with these requirements. - This is important because extra requirements may look like `my_req; - extra = "my_extra"`, which would otherwise be interpreted as a purely - optional requirement. Instead, we want to be able to assert that these - requirements are truly required. - """ - - # set up the stack - requirements = list(requirements)[::-1] - # set of processed requirements - processed = {} - # key -> dist - best = {} - to_activate = [] - - req_extras = _ReqExtras() - - # Mapping of requirement to set of distributions that required it; - # useful for reporting info about conflicts. - required_by = collections.defaultdict(set) - - while requirements: - # process dependencies breadth-first - req = requirements.pop(0) - if req in processed: - # Ignore cyclic or redundant dependencies - continue - - if not req_extras.markers_pass(req, extras): - continue - - dist = best.get(req.key) - if dist is None: - # Find the best distribution and add it to the map - dist = self.by_key.get(req.key) - if dist is None or (dist not in req and replace_conflicting): - ws = self - if env is None: - if dist is None: - env = Environment(self.entries) - else: - # Use an empty environment and workingset to avoid - # any further conflicts with the conflicting - # distribution - env = Environment([]) - ws = WorkingSet([]) - dist = best[req.key] = env.best_match( - req, ws, installer, - replace_conflicting=replace_conflicting - ) - if dist is None: - requirers = required_by.get(req, None) - raise DistributionNotFound(req, requirers) - to_activate.append(dist) - if dist not in req: - # Oops, the "best" so far conflicts with a dependency - dependent_req = required_by[req] - raise VersionConflict(dist, req).with_context(dependent_req) - - # push the new requirements onto the stack - new_requirements = dist.requires(req.extras)[::-1] - requirements.extend(new_requirements) - - # Register the new requirements needed by req - for new_requirement in new_requirements: - required_by[new_requirement].add(req.project_name) - req_extras[new_requirement] = req.extras - - processed[req] = True - - # return list of distros to activate - return to_activate - - def find_plugins( - self, plugin_env, full_env=None, installer=None, fallback=True): - """Find all activatable distributions in `plugin_env` - - Example usage:: - - distributions, errors = working_set.find_plugins( - Environment(plugin_dirlist) - ) - # add plugins+libs to sys.path - map(working_set.add, distributions) - # display errors - print('Could not load', errors) - - The `plugin_env` should be an ``Environment`` instance that contains - only distributions that are in the project's "plugin directory" or - directories. The `full_env`, if supplied, should be an ``Environment`` - contains all currently-available distributions. If `full_env` is not - supplied, one is created automatically from the ``WorkingSet`` this - method is called on, which will typically mean that every directory on - ``sys.path`` will be scanned for distributions. - - `installer` is a standard installer callback as used by the - ``resolve()`` method. The `fallback` flag indicates whether we should - attempt to resolve older versions of a plugin if the newest version - cannot be resolved. - - This method returns a 2-tuple: (`distributions`, `error_info`), where - `distributions` is a list of the distributions found in `plugin_env` - that were loadable, along with any other distributions that are needed - to resolve their dependencies. `error_info` is a dictionary mapping - unloadable plugin distributions to an exception instance describing the - error that occurred. Usually this will be a ``DistributionNotFound`` or - ``VersionConflict`` instance. - """ - - plugin_projects = list(plugin_env) - # scan project names in alphabetic order - plugin_projects.sort() - - error_info = {} - distributions = {} - - if full_env is None: - env = Environment(self.entries) - env += plugin_env - else: - env = full_env + plugin_env - - shadow_set = self.__class__([]) - # put all our entries in shadow_set - list(map(shadow_set.add, self)) - - for project_name in plugin_projects: - - for dist in plugin_env[project_name]: - - req = [dist.as_requirement()] - - try: - resolvees = shadow_set.resolve(req, env, installer) - - except ResolutionError as v: - # save error info - error_info[dist] = v - if fallback: - # try the next older version of project - continue - else: - # give up on this project, keep going - break - - else: - list(map(shadow_set.add, resolvees)) - distributions.update(dict.fromkeys(resolvees)) - - # success, no need to try any more versions of this project - break - - distributions = list(distributions) - distributions.sort() - - return distributions, error_info - - def require(self, *requirements): - """Ensure that distributions matching `requirements` are activated - - `requirements` must be a string or a (possibly-nested) sequence - thereof, specifying the distributions and versions required. The - return value is a sequence of the distributions that needed to be - activated to fulfill the requirements; all relevant distributions are - included, even if they were already activated in this working set. - """ - needed = self.resolve(parse_requirements(requirements)) - - for dist in needed: - self.add(dist) - - return needed - - def subscribe(self, callback, existing=True): - """Invoke `callback` for all distributions - - If `existing=True` (default), - call on all existing ones, as well. - """ - if callback in self.callbacks: - return - self.callbacks.append(callback) - if not existing: - return - for dist in self: - callback(dist) - - def _added_new(self, dist): - for callback in self.callbacks: - callback(dist) - - def __getstate__(self): - return ( - self.entries[:], self.entry_keys.copy(), self.by_key.copy(), - self.callbacks[:] - ) - - def __setstate__(self, e_k_b_c): - entries, keys, by_key, callbacks = e_k_b_c - self.entries = entries[:] - self.entry_keys = keys.copy() - self.by_key = by_key.copy() - self.callbacks = callbacks[:] - - -class _ReqExtras(dict): - """ - Map each requirement to the extras that demanded it. - """ - - def markers_pass(self, req, extras=None): - """ - Evaluate markers for req against each extra that - demanded it. - - Return False if the req has a marker and fails - evaluation. Otherwise, return True. - """ - extra_evals = ( - req.marker.evaluate({'extra': extra}) - for extra in self.get(req, ()) + (extras or (None,)) - ) - return not req.marker or any(extra_evals) - - -class Environment: - """Searchable snapshot of distributions on a search path""" - - def __init__( - self, search_path=None, platform=get_supported_platform(), - python=PY_MAJOR): - """Snapshot distributions available on a search path - - Any distributions found on `search_path` are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. - - `platform` is an optional string specifying the name of the platform - that platform-specific distributions must be compatible with. If - unspecified, it defaults to the current platform. `python` is an - optional string naming the desired version of Python (e.g. ``'3.6'``); - it defaults to the current version. - - You may explicitly set `platform` (and/or `python`) to ``None`` if you - wish to map *all* distributions, not just those compatible with the - running platform or Python version. - """ - self._distmap = {} - self.platform = platform - self.python = python - self.scan(search_path) - - def can_add(self, dist): - """Is distribution `dist` acceptable for this environment? - - The distribution must match the platform and python version - requirements specified when this environment was created, or False - is returned. - """ - py_compat = ( - self.python is None - or dist.py_version is None - or dist.py_version == self.python - ) - return py_compat and compatible_platforms(dist.platform, self.platform) - - def remove(self, dist): - """Remove `dist` from the environment""" - self._distmap[dist.key].remove(dist) - - def scan(self, search_path=None): - """Scan `search_path` for distributions usable in this environment - - Any distributions found are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. Only distributions conforming to - the platform/python version defined at initialization are added. - """ - if search_path is None: - search_path = sys.path - - for item in search_path: - for dist in find_distributions(item): - self.add(dist) - - def __getitem__(self, project_name): - """Return a newest-to-oldest list of distributions for `project_name` - - Uses case-insensitive `project_name` comparison, assuming all the - project's distributions use their project's name converted to all - lowercase as their key. - - """ - distribution_key = project_name.lower() - return self._distmap.get(distribution_key, []) - - def add(self, dist): - """Add `dist` if we ``can_add()`` it and it has not already been added - """ - if self.can_add(dist) and dist.has_version(): - dists = self._distmap.setdefault(dist.key, []) - if dist not in dists: - dists.append(dist) - dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - - def best_match( - self, req, working_set, installer=None, replace_conflicting=False): - """Find distribution best matching `req` and usable on `working_set` - - This calls the ``find(req)`` method of the `working_set` to see if a - suitable distribution is already active. (This may raise - ``VersionConflict`` if an unsuitable version of the project is already - active in the specified `working_set`.) If a suitable distribution - isn't active, this method returns the newest distribution in the - environment that meets the ``Requirement`` in `req`. If no suitable - distribution is found, and `installer` is supplied, then the result of - calling the environment's ``obtain(req, installer)`` method will be - returned. - """ - try: - dist = working_set.find(req) - except VersionConflict: - if not replace_conflicting: - raise - dist = None - if dist is not None: - return dist - for dist in self[req.key]: - if dist in req: - return dist - # try to download/install - return self.obtain(req, installer) - - def obtain(self, requirement, installer=None): - """Obtain a distribution matching `requirement` (e.g. via download) - - Obtain a distro that matches requirement (e.g. via download). In the - base ``Environment`` class, this routine just returns - ``installer(requirement)``, unless `installer` is None, in which case - None is returned instead. This method is a hook that allows subclasses - to attempt other ways of obtaining a distribution before falling back - to the `installer` argument.""" - if installer is not None: - return installer(requirement) - - def __iter__(self): - """Yield the unique project names of the available distributions""" - for key in self._distmap.keys(): - if self[key]: - yield key - - def __iadd__(self, other): - """In-place addition of a distribution or environment""" - if isinstance(other, Distribution): - self.add(other) - elif isinstance(other, Environment): - for project in other: - for dist in other[project]: - self.add(dist) - else: - raise TypeError("Can't add %r to environment" % (other,)) - return self - - def __add__(self, other): - """Add an environment or distribution to an environment""" - new = self.__class__([], platform=None, python=None) - for env in self, other: - new += env - return new - - -# XXX backward compatibility -AvailableDistributions = Environment - - -class ExtractionError(RuntimeError): - """An error occurred extracting a resource - - The following attributes are available from instances of this exception: - - manager - The resource manager that raised this exception - - cache_path - The base directory for resource extraction - - original_error - The exception instance that caused extraction to fail - """ - - -class ResourceManager: - """Manage resource extraction and packages""" - extraction_path = None - - def __init__(self): - self.cached_files = {} - - def resource_exists(self, package_or_requirement, resource_name): - """Does the named resource exist?""" - return get_provider(package_or_requirement).has_resource(resource_name) - - def resource_isdir(self, package_or_requirement, resource_name): - """Is the named resource an existing directory?""" - return get_provider(package_or_requirement).resource_isdir( - resource_name - ) - - def resource_filename(self, package_or_requirement, resource_name): - """Return a true filesystem path for specified resource""" - return get_provider(package_or_requirement).get_resource_filename( - self, resource_name - ) - - def resource_stream(self, package_or_requirement, resource_name): - """Return a readable file-like object for specified resource""" - return get_provider(package_or_requirement).get_resource_stream( - self, resource_name - ) - - def resource_string(self, package_or_requirement, resource_name): - """Return specified resource as a string""" - return get_provider(package_or_requirement).get_resource_string( - self, resource_name - ) - - def resource_listdir(self, package_or_requirement, resource_name): - """List the contents of the named resource directory""" - return get_provider(package_or_requirement).resource_listdir( - resource_name - ) - - def extraction_error(self): - """Give an error message for problems extracting file(s)""" - - old_exc = sys.exc_info()[1] - cache_path = self.extraction_path or get_default_cache() - - tmpl = textwrap.dedent(""" - Can't extract file(s) to egg cache - - The following error occurred while trying to extract file(s) - to the Python egg cache: - - {old_exc} - - The Python egg cache directory is currently set to: - - {cache_path} - - Perhaps your account does not have write access to this directory? - You can change the cache directory by setting the PYTHON_EGG_CACHE - environment variable to point to an accessible directory. - """).lstrip() - err = ExtractionError(tmpl.format(**locals())) - err.manager = self - err.cache_path = cache_path - err.original_error = old_exc - raise err - - def get_cache_path(self, archive_name, names=()): - """Return absolute location in cache for `archive_name` and `names` - - The parent directory of the resulting path will be created if it does - not already exist. `archive_name` should be the base filename of the - enclosing egg (which may not be the name of the enclosing zipfile!), - including its ".egg" extension. `names`, if provided, should be a - sequence of path name parts "under" the egg's extraction location. - - This method should only be called by resource providers that need to - obtain an extraction location, and only for names they intend to - extract, as it tracks the generated names for possible cleanup later. - """ - extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name + '-tmp', *names) - try: - _bypass_ensure_directory(target_path) - except Exception: - self.extraction_error() - - self._warn_unsafe_extraction_path(extract_path) - - self.cached_files[target_path] = 1 - return target_path - - @staticmethod - def _warn_unsafe_extraction_path(path): - """ - If the default extraction path is overridden and set to an insecure - location, such as /tmp, it opens up an opportunity for an attacker to - replace an extracted file with an unauthorized payload. Warn the user - if a known insecure location is used. - - See Distribute #375 for more details. - """ - if os.name == 'nt' and not path.startswith(os.environ['windir']): - # On Windows, permissions are generally restrictive by default - # and temp directories are not writable by other users, so - # bypass the warning. - return - mode = os.stat(path).st_mode - if mode & stat.S_IWOTH or mode & stat.S_IWGRP: - msg = ( - "Extraction path is writable by group/others " - "and vulnerable to attack when " - "used with get_resource_filename ({path}). " - "Consider a more secure " - "location (set with .set_extraction_path or the " - "PYTHON_EGG_CACHE environment variable)." - ).format(**locals()) - warnings.warn(msg, UserWarning) - - def postprocess(self, tempname, filename): - """Perform any platform-specific postprocessing of `tempname` - - This is where Mac header rewrites should be done; other platforms don't - have anything special they should do. - - Resource providers should call this method ONLY after successfully - extracting a compressed resource. They must NOT call it on resources - that are already in the filesystem. - - `tempname` is the current (temporary) name of the file, and `filename` - is the name it will be renamed to by the caller after this routine - returns. - """ - - if os.name == 'posix': - # Make the resource executable - mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 - os.chmod(tempname, mode) - - def set_extraction_path(self, path): - """Set the base path where resources will be extracted to, if needed. - - If you do not call this routine before any extractions take place, the - path defaults to the return value of ``get_default_cache()``. (Which - is based on the ``PYTHON_EGG_CACHE`` environment variable, with various - platform-specific fallbacks. See that routine's documentation for more - details.) - - Resources are extracted to subdirectories of this path based upon - information given by the ``IResourceProvider``. You may set this to a - temporary directory, but then you must call ``cleanup_resources()`` to - delete the extracted files when done. There is no guarantee that - ``cleanup_resources()`` will be able to remove all extracted files. - - (Note: you may not change the extraction path for a given resource - manager once resources have been extracted, unless you first call - ``cleanup_resources()``.) - """ - if self.cached_files: - raise ValueError( - "Can't change extraction path, files already extracted" - ) - - self.extraction_path = path - - def cleanup_resources(self, force=False): - """ - Delete all extracted resource files and directories, returning a list - of the file and directory names that could not be successfully removed. - This function does not have any concurrency protection, so it should - generally only be called when the extraction path is a temporary - directory exclusive to a single process. This method is not - automatically called; you must call it explicitly or register it as an - ``atexit`` function if you wish to ensure cleanup of a temporary - directory used for extractions. - """ - # XXX - - -def get_default_cache(): - """ - Return the ``PYTHON_EGG_CACHE`` environment variable - or a platform-relevant user cache dir for an app - named "Python-Eggs". - """ - return ( - os.environ.get('PYTHON_EGG_CACHE') - or appdirs.user_cache_dir(appname='Python-Eggs') - ) - - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """ - Convert an arbitrary string to a standard version string - """ - try: - # normalize the version - return str(packaging.version.Version(version)) - except packaging.version.InvalidVersion: - version = version.replace(' ', '.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def safe_extra(extra): - """Convert an arbitrary string to a standard 'extra' name - - Any runs of non-alphanumeric characters are replaced with a single '_', - and the result is always lowercased. - """ - return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-', '_') - - -def invalid_marker(text): - """ - Validate text as a PEP 508 environment marker; return an exception - if invalid or False otherwise. - """ - try: - evaluate_marker(text) - except SyntaxError as e: - e.filename = None - e.lineno = None - return e - return False - - -def evaluate_marker(text, extra=None): - """ - Evaluate a PEP 508 environment marker. - Return a boolean indicating the marker result in this environment. - Raise SyntaxError if marker is invalid. - - This implementation uses the 'pyparsing' module. - """ - try: - marker = packaging.markers.Marker(text) - return marker.evaluate() - except packaging.markers.InvalidMarker as e: - raise SyntaxError(e) from e - - -class NullProvider: - """Try to implement resources and metadata for arbitrary PEP 302 loaders""" - - egg_name = None - egg_info = None - loader = None - - def __init__(self, module): - self.loader = getattr(module, '__loader__', None) - self.module_path = os.path.dirname(getattr(module, '__file__', '')) - - def get_resource_filename(self, manager, resource_name): - return self._fn(self.module_path, resource_name) - - def get_resource_stream(self, manager, resource_name): - return io.BytesIO(self.get_resource_string(manager, resource_name)) - - def get_resource_string(self, manager, resource_name): - return self._get(self._fn(self.module_path, resource_name)) - - def has_resource(self, resource_name): - return self._has(self._fn(self.module_path, resource_name)) - - def _get_metadata_path(self, name): - return self._fn(self.egg_info, name) - - def has_metadata(self, name): - if not self.egg_info: - return self.egg_info - - path = self._get_metadata_path(name) - return self._has(path) - - def get_metadata(self, name): - if not self.egg_info: - return "" - path = self._get_metadata_path(name) - value = self._get(path) - try: - return value.decode('utf-8') - except UnicodeDecodeError as exc: - # Include the path in the error message to simplify - # troubleshooting, and without changing the exception type. - exc.reason += ' in {} file at path: {}'.format(name, path) - raise - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - def resource_isdir(self, resource_name): - return self._isdir(self._fn(self.module_path, resource_name)) - - def metadata_isdir(self, name): - return self.egg_info and self._isdir(self._fn(self.egg_info, name)) - - def resource_listdir(self, resource_name): - return self._listdir(self._fn(self.module_path, resource_name)) - - def metadata_listdir(self, name): - if self.egg_info: - return self._listdir(self._fn(self.egg_info, name)) - return [] - - def run_script(self, script_name, namespace): - script = 'scripts/' + script_name - if not self.has_metadata(script): - raise ResolutionError( - "Script {script!r} not found in metadata at {self.egg_info!r}" - .format(**locals()), - ) - script_text = self.get_metadata(script).replace('\r\n', '\n') - script_text = script_text.replace('\r', '\n') - script_filename = self._fn(self.egg_info, script) - namespace['__file__'] = script_filename - if os.path.exists(script_filename): - with open(script_filename) as fid: - source = fid.read() - code = compile(source, script_filename, 'exec') - exec(code, namespace, namespace) - else: - from linecache import cache - cache[script_filename] = ( - len(script_text), 0, script_text.split('\n'), script_filename - ) - script_code = compile(script_text, script_filename, 'exec') - exec(script_code, namespace, namespace) - - def _has(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _isdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _listdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _fn(self, base, resource_name): - self._validate_resource_path(resource_name) - if resource_name: - return os.path.join(base, *resource_name.split('/')) - return base - - @staticmethod - def _validate_resource_path(path): - """ - Validate the resource paths according to the docs. - https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access - - >>> warned = getfixture('recwarn') - >>> warnings.simplefilter('always') - >>> vrp = NullProvider._validate_resource_path - >>> vrp('foo/bar.txt') - >>> bool(warned) - False - >>> vrp('../foo/bar.txt') - >>> bool(warned) - True - >>> warned.clear() - >>> vrp('/foo/bar.txt') - >>> bool(warned) - True - >>> vrp('foo/../../bar.txt') - >>> bool(warned) - True - >>> warned.clear() - >>> vrp('foo/f../bar.txt') - >>> bool(warned) - False - - Windows path separators are straight-up disallowed. - >>> vrp(r'\\foo/bar.txt') - Traceback (most recent call last): - ... - ValueError: Use of .. or absolute path in a resource path \ -is not allowed. - - >>> vrp(r'C:\\foo/bar.txt') - Traceback (most recent call last): - ... - ValueError: Use of .. or absolute path in a resource path \ -is not allowed. - - Blank values are allowed - - >>> vrp('') - >>> bool(warned) - False - - Non-string values are not. - - >>> vrp(None) - Traceback (most recent call last): - ... - AttributeError: ... - """ - invalid = ( - os.path.pardir in path.split(posixpath.sep) or - posixpath.isabs(path) or - ntpath.isabs(path) - ) - if not invalid: - return - - msg = "Use of .. or absolute path in a resource path is not allowed." - - # Aggressively disallow Windows absolute paths - if ntpath.isabs(path) and not posixpath.isabs(path): - raise ValueError(msg) - - # for compatibility, warn; in future - # raise ValueError(msg) - warnings.warn( - msg[:-1] + " and will raise exceptions in a future release.", - DeprecationWarning, - stacklevel=4, - ) - - def _get(self, path): - if hasattr(self.loader, 'get_data'): - return self.loader.get_data(path) - raise NotImplementedError( - "Can't perform this operation for loaders without 'get_data()'" - ) - - -register_loader_type(object, NullProvider) - - -def _parents(path): - """ - yield all parents of path including path - """ - last = None - while path != last: - yield path - last = path - path, _ = os.path.split(path) - - -class EggProvider(NullProvider): - """Provider based on a virtual filesystem""" - - def __init__(self, module): - super().__init__(module) - self._setup_prefix() - - def _setup_prefix(self): - # Assume that metadata may be nested inside a "basket" - # of multiple eggs and use module_path instead of .archive. - eggs = filter(_is_egg_path, _parents(self.module_path)) - egg = next(eggs, None) - egg and self._set_egg(egg) - - def _set_egg(self, path): - self.egg_name = os.path.basename(path) - self.egg_info = os.path.join(path, 'EGG-INFO') - self.egg_root = path - - -class DefaultProvider(EggProvider): - """Provides access to package resources in the filesystem""" - - def _has(self, path): - return os.path.exists(path) - - def _isdir(self, path): - return os.path.isdir(path) - - def _listdir(self, path): - return os.listdir(path) - - def get_resource_stream(self, manager, resource_name): - return open(self._fn(self.module_path, resource_name), 'rb') - - def _get(self, path): - with open(path, 'rb') as stream: - return stream.read() - - @classmethod - def _register(cls): - loader_names = 'SourceFileLoader', 'SourcelessFileLoader', - for name in loader_names: - loader_cls = getattr(importlib_machinery, name, type(None)) - register_loader_type(loader_cls, cls) - - -DefaultProvider._register() - - -class EmptyProvider(NullProvider): - """Provider that returns nothing for all requests""" - - module_path = None - - _isdir = _has = lambda self, path: False - - def _get(self, path): - return '' - - def _listdir(self, path): - return [] - - def __init__(self): - pass - - -empty_provider = EmptyProvider() - - -class ZipManifests(dict): - """ - zip manifest builder - """ - - @classmethod - def build(cls, path): - """ - Build a dictionary similar to the zipimport directory - caches, except instead of tuples, store ZipInfo objects. - - Use a platform-specific path separator (os.sep) for the path keys - for compatibility with pypy on Windows. - """ - with zipfile.ZipFile(path) as zfile: - items = ( - ( - name.replace('/', os.sep), - zfile.getinfo(name), - ) - for name in zfile.namelist() - ) - return dict(items) - - load = build - - -class MemoizedZipManifests(ZipManifests): - """ - Memoized zipfile manifests. - """ - manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') - - def load(self, path): - """ - Load a manifest at path or return a suitable manifest already loaded. - """ - path = os.path.normpath(path) - mtime = os.stat(path).st_mtime - - if path not in self or self[path].mtime != mtime: - manifest = self.build(path) - self[path] = self.manifest_mod(manifest, mtime) - - return self[path].manifest - - -class ZipProvider(EggProvider): - """Resource support for zips and eggs""" - - eagers = None - _zip_manifests = MemoizedZipManifests() - - def __init__(self, module): - super().__init__(module) - self.zip_pre = self.loader.archive + os.sep - - def _zipinfo_name(self, fspath): - # Convert a virtual filename (full path to file) into a zipfile subpath - # usable with the zipimport directory cache for our target archive - fspath = fspath.rstrip(os.sep) - if fspath == self.loader.archive: - return '' - if fspath.startswith(self.zip_pre): - return fspath[len(self.zip_pre):] - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.zip_pre) - ) - - def _parts(self, zip_path): - # Convert a zipfile subpath into an egg-relative path part list. - # pseudo-fs path - fspath = self.zip_pre + zip_path - if fspath.startswith(self.egg_root + os.sep): - return fspath[len(self.egg_root) + 1:].split(os.sep) - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.egg_root) - ) - - @property - def zipinfo(self): - return self._zip_manifests.load(self.loader.archive) - - def get_resource_filename(self, manager, resource_name): - if not self.egg_name: - raise NotImplementedError( - "resource_filename() only supported for .egg, not .zip" - ) - # no need to lock for extraction, since we use temp names - zip_path = self._resource_to_zip(resource_name) - eagers = self._get_eager_resources() - if '/'.join(self._parts(zip_path)) in eagers: - for name in eagers: - self._extract_resource(manager, self._eager_to_zip(name)) - return self._extract_resource(manager, zip_path) - - @staticmethod - def _get_date_and_size(zip_stat): - size = zip_stat.file_size - # ymdhms+wday, yday, dst - date_time = zip_stat.date_time + (0, 0, -1) - # 1980 offset already done - timestamp = time.mktime(date_time) - return timestamp, size - - # FIXME: 'ZipProvider._extract_resource' is too complex (12) - def _extract_resource(self, manager, zip_path): # noqa: C901 - - if zip_path in self._index(): - for name in self._index()[zip_path]: - last = self._extract_resource( - manager, os.path.join(zip_path, name) - ) - # return the extracted directory name - return os.path.dirname(last) - - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - - if not WRITE_SUPPORT: - raise IOError('"os.rename" and "os.unlink" are not supported ' - 'on this platform') - try: - - real_path = manager.get_cache_path( - self.egg_name, self._parts(zip_path) - ) - - if self._is_current(real_path, zip_path): - return real_path - - outf, tmpnam = _mkstemp( - ".$extract", - dir=os.path.dirname(real_path), - ) - os.write(outf, self.loader.get_data(zip_path)) - os.close(outf) - utime(tmpnam, (timestamp, timestamp)) - manager.postprocess(tmpnam, real_path) - - try: - rename(tmpnam, real_path) - - except os.error: - if os.path.isfile(real_path): - if self._is_current(real_path, zip_path): - # the file became current since it was checked above, - # so proceed. - return real_path - # Windows, del old file and retry - elif os.name == 'nt': - unlink(real_path) - rename(tmpnam, real_path) - return real_path - raise - - except os.error: - # report a user-friendly error - manager.extraction_error() - - return real_path - - def _is_current(self, file_path, zip_path): - """ - Return True if the file_path is current for this zip_path - """ - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - if not os.path.isfile(file_path): - return False - stat = os.stat(file_path) - if stat.st_size != size or stat.st_mtime != timestamp: - return False - # check that the contents match - zip_contents = self.loader.get_data(zip_path) - with open(file_path, 'rb') as f: - file_contents = f.read() - return zip_contents == file_contents - - def _get_eager_resources(self): - if self.eagers is None: - eagers = [] - for name in ('native_libs.txt', 'eager_resources.txt'): - if self.has_metadata(name): - eagers.extend(self.get_metadata_lines(name)) - self.eagers = eagers - return self.eagers - - def _index(self): - try: - return self._dirindex - except AttributeError: - ind = {} - for path in self.zipinfo: - parts = path.split(os.sep) - while parts: - parent = os.sep.join(parts[:-1]) - if parent in ind: - ind[parent].append(parts[-1]) - break - else: - ind[parent] = [parts.pop()] - self._dirindex = ind - return ind - - def _has(self, fspath): - zip_path = self._zipinfo_name(fspath) - return zip_path in self.zipinfo or zip_path in self._index() - - def _isdir(self, fspath): - return self._zipinfo_name(fspath) in self._index() - - def _listdir(self, fspath): - return list(self._index().get(self._zipinfo_name(fspath), ())) - - def _eager_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.egg_root, resource_name)) - - def _resource_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.module_path, resource_name)) - - -register_loader_type(zipimport.zipimporter, ZipProvider) - - -class FileMetadata(EmptyProvider): - """Metadata handler for standalone PKG-INFO files - - Usage:: - - metadata = FileMetadata("/path/to/PKG-INFO") - - This provider rejects all data and metadata requests except for PKG-INFO, - which is treated as existing, and will be the contents of the file at - the provided location. - """ - - def __init__(self, path): - self.path = path - - def _get_metadata_path(self, name): - return self.path - - def has_metadata(self, name): - return name == 'PKG-INFO' and os.path.isfile(self.path) - - def get_metadata(self, name): - if name != 'PKG-INFO': - raise KeyError("No metadata except PKG-INFO is available") - - with io.open(self.path, encoding='utf-8', errors="replace") as f: - metadata = f.read() - self._warn_on_replacement(metadata) - return metadata - - def _warn_on_replacement(self, metadata): - replacement_char = '�' - if replacement_char in metadata: - tmpl = "{self.path} could not be properly decoded in UTF-8" - msg = tmpl.format(**locals()) - warnings.warn(msg) - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - -class PathMetadata(DefaultProvider): - """Metadata provider for egg directories - - Usage:: - - # Development eggs: - - egg_info = "/path/to/PackageName.egg-info" - base_dir = os.path.dirname(egg_info) - metadata = PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - dist = Distribution(basedir, project_name=dist_name, metadata=metadata) - - # Unpacked egg directories: - - egg_path = "/path/to/PackageName-ver-pyver-etc.egg" - metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) - dist = Distribution.from_filename(egg_path, metadata=metadata) - """ - - def __init__(self, path, egg_info): - self.module_path = path - self.egg_info = egg_info - - -class EggMetadata(ZipProvider): - """Metadata provider for .egg files""" - - def __init__(self, importer): - """Create a metadata provider from a zipimporter""" - - self.zip_pre = importer.archive + os.sep - self.loader = importer - if importer.prefix: - self.module_path = os.path.join(importer.archive, importer.prefix) - else: - self.module_path = importer.archive - self._setup_prefix() - - -_declare_state('dict', _distribution_finders={}) - - -def register_finder(importer_type, distribution_finder): - """Register `distribution_finder` to find distributions in sys.path items - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `distribution_finder` is a callable that, passed a path - item and the importer instance, yields ``Distribution`` instances found on - that path item. See ``pkg_resources.find_on_path`` for an example.""" - _distribution_finders[importer_type] = distribution_finder - - -def find_distributions(path_item, only=False): - """Yield distributions accessible via `path_item`""" - importer = get_importer(path_item) - finder = _find_adapter(_distribution_finders, importer) - return finder(importer, path_item, only) - - -def find_eggs_in_zip(importer, path_item, only=False): - """ - Find eggs in zip files; possibly multiple nested eggs. - """ - if importer.archive.endswith('.whl'): - # wheels are not supported with this finder - # they don't have PKG-INFO metadata, and won't ever contain eggs - return - metadata = EggMetadata(importer) - if metadata.has_metadata('PKG-INFO'): - yield Distribution.from_filename(path_item, metadata=metadata) - if only: - # don't yield nested distros - return - for subitem in metadata.resource_listdir(''): - if _is_egg_path(subitem): - subpath = os.path.join(path_item, subitem) - dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) - for dist in dists: - yield dist - elif subitem.lower().endswith(('.dist-info', '.egg-info')): - subpath = os.path.join(path_item, subitem) - submeta = EggMetadata(zipimport.zipimporter(subpath)) - submeta.egg_info = subpath - yield Distribution.from_location(path_item, subitem, submeta) - - -register_finder(zipimport.zipimporter, find_eggs_in_zip) - - -def find_nothing(importer, path_item, only=False): - return () - - -register_finder(object, find_nothing) - - -def _by_version_descending(names): - """ - Given a list of filenames, return them in descending order - by version number. - - >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' - >>> _by_version_descending(names) - ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'bar', 'foo'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] - """ - def try_parse(name): - """ - Attempt to parse as a version or return a null version. - """ - try: - return packaging.version.Version(name) - except Exception: - return packaging.version.Version('0') - - def _by_version(name): - """ - Parse each component of the filename - """ - name, ext = os.path.splitext(name) - parts = itertools.chain(name.split('-'), [ext]) - return [try_parse(part) for part in parts] - - return sorted(names, key=_by_version, reverse=True) - - -def find_on_path(importer, path_item, only=False): - """Yield distributions accessible on a sys.path directory""" - path_item = _normalize_cached(path_item) - - if _is_unpacked_egg(path_item): - yield Distribution.from_filename( - path_item, metadata=PathMetadata( - path_item, os.path.join(path_item, 'EGG-INFO') - ) - ) - return - - entries = ( - os.path.join(path_item, child) - for child in safe_listdir(path_item) - ) - - # for performance, before sorting by version, - # screen entries for only those that will yield - # distributions - filtered = ( - entry - for entry in entries - if dist_factory(path_item, entry, only) - ) - - # scan for .egg and .egg-info in directory - path_item_entries = _by_version_descending(filtered) - for entry in path_item_entries: - fullpath = os.path.join(path_item, entry) - factory = dist_factory(path_item, entry, only) - for dist in factory(fullpath): - yield dist - - -def dist_factory(path_item, entry, only): - """Return a dist_factory for the given entry.""" - lower = entry.lower() - is_egg_info = lower.endswith('.egg-info') - is_dist_info = ( - lower.endswith('.dist-info') and - os.path.isdir(os.path.join(path_item, entry)) - ) - is_meta = is_egg_info or is_dist_info - return ( - distributions_from_metadata - if is_meta else - find_distributions - if not only and _is_egg_path(entry) else - resolve_egg_link - if not only and lower.endswith('.egg-link') else - NoDists() - ) - - -class NoDists: - """ - >>> bool(NoDists()) - False - - >>> list(NoDists()('anything')) - [] - """ - def __bool__(self): - return False - - def __call__(self, fullpath): - return iter(()) - - -def safe_listdir(path): - """ - Attempt to list contents of path, but suppress some exceptions. - """ - try: - return os.listdir(path) - except (PermissionError, NotADirectoryError): - pass - except OSError as e: - # Ignore the directory if does not exist, not a directory or - # permission denied - if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT): - raise - return () - - -def distributions_from_metadata(path): - root = os.path.dirname(path) - if os.path.isdir(path): - if len(os.listdir(path)) == 0: - # empty metadata dir; skip - return - metadata = PathMetadata(root, path) - else: - metadata = FileMetadata(path) - entry = os.path.basename(path) - yield Distribution.from_location( - root, entry, metadata, precedence=DEVELOP_DIST, - ) - - -def non_empty_lines(path): - """ - Yield non-empty lines from file at path - """ - with open(path) as f: - for line in f: - line = line.strip() - if line: - yield line - - -def resolve_egg_link(path): - """ - Given a path to an .egg-link, resolve distributions - present in the referenced path. - """ - referenced_paths = non_empty_lines(path) - resolved_paths = ( - os.path.join(os.path.dirname(path), ref) - for ref in referenced_paths - ) - dist_groups = map(find_distributions, resolved_paths) - return next(dist_groups, ()) - - -register_finder(pkgutil.ImpImporter, find_on_path) - -if hasattr(importlib_machinery, 'FileFinder'): - register_finder(importlib_machinery.FileFinder, find_on_path) - -_declare_state('dict', _namespace_handlers={}) -_declare_state('dict', _namespace_packages={}) - - -def register_namespace_handler(importer_type, namespace_handler): - """Register `namespace_handler` to declare namespace packages - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `namespace_handler` is a callable like this:: - - def namespace_handler(importer, path_entry, moduleName, module): - # return a path_entry to use for child packages - - Namespace handlers are only called if the importer object has already - agreed that it can handle the relevant path item, and they should only - return a subpath if the module __path__ does not already contain an - equivalent subpath. For an example namespace handler, see - ``pkg_resources.file_ns_handler``. - """ - _namespace_handlers[importer_type] = namespace_handler - - -def _handle_ns(packageName, path_item): - """Ensure that named package includes a subpath of path_item (if needed)""" - - importer = get_importer(path_item) - if importer is None: - return None - - # use find_spec (PEP 451) and fall-back to find_module (PEP 302) - try: - spec = importer.find_spec(packageName) - except AttributeError: - # capture warnings due to #1111 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - loader = importer.find_module(packageName) - else: - loader = spec.loader if spec else None - - if loader is None: - return None - module = sys.modules.get(packageName) - if module is None: - module = sys.modules[packageName] = types.ModuleType(packageName) - module.__path__ = [] - _set_parent_ns(packageName) - elif not hasattr(module, '__path__'): - raise TypeError("Not a package:", packageName) - handler = _find_adapter(_namespace_handlers, importer) - subpath = handler(importer, path_item, packageName, module) - if subpath is not None: - path = module.__path__ - path.append(subpath) - importlib.import_module(packageName) - _rebuild_mod_path(path, packageName, module) - return subpath - - -def _rebuild_mod_path(orig_path, package_name, module): - """ - Rebuild module.__path__ ensuring that all entries are ordered - corresponding to their sys.path order - """ - sys_path = [_normalize_cached(p) for p in sys.path] - - def safe_sys_path_index(entry): - """ - Workaround for #520 and #513. - """ - try: - return sys_path.index(entry) - except ValueError: - return float('inf') - - def position_in_sys_path(path): - """ - Return the ordinal of the path based on its position in sys.path - """ - path_parts = path.split(os.sep) - module_parts = package_name.count('.') + 1 - parts = path_parts[:-module_parts] - return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) - - new_path = sorted(orig_path, key=position_in_sys_path) - new_path = [_normalize_cached(p) for p in new_path] - - if isinstance(module.__path__, list): - module.__path__[:] = new_path - else: - module.__path__ = new_path - - -def declare_namespace(packageName): - """Declare that package 'packageName' is a namespace package""" - - _imp.acquire_lock() - try: - if packageName in _namespace_packages: - return - - path = sys.path - parent, _, _ = packageName.rpartition('.') - - if parent: - declare_namespace(parent) - if parent not in _namespace_packages: - __import__(parent) - try: - path = sys.modules[parent].__path__ - except AttributeError as e: - raise TypeError("Not a package:", parent) from e - - # Track what packages are namespaces, so when new path items are added, - # they can be updated - _namespace_packages.setdefault(parent or None, []).append(packageName) - _namespace_packages.setdefault(packageName, []) - - for path_item in path: - # Ensure all the parent's path items are reflected in the child, - # if they apply - _handle_ns(packageName, path_item) - - finally: - _imp.release_lock() - - -def fixup_namespace_packages(path_item, parent=None): - """Ensure that previously-declared namespace packages include path_item""" - _imp.acquire_lock() - try: - for package in _namespace_packages.get(parent, ()): - subpath = _handle_ns(package, path_item) - if subpath: - fixup_namespace_packages(subpath, package) - finally: - _imp.release_lock() - - -def file_ns_handler(importer, path_item, packageName, module): - """Compute an ns-package subpath for a filesystem or zipfile importer""" - - subpath = os.path.join(path_item, packageName.split('.')[-1]) - normalized = _normalize_cached(subpath) - for item in module.__path__: - if _normalize_cached(item) == normalized: - break - else: - # Only return the path if it's not already there - return subpath - - -register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) -register_namespace_handler(zipimport.zipimporter, file_ns_handler) - -if hasattr(importlib_machinery, 'FileFinder'): - register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) - - -def null_ns_handler(importer, path_item, packageName, module): - return None - - -register_namespace_handler(object, null_ns_handler) - - -def normalize_path(filename): - """Normalize a file/dir name for comparison purposes""" - return os.path.normcase(os.path.realpath(os.path.normpath( - _cygwin_patch(filename)))) - - -def _cygwin_patch(filename): # pragma: nocover - """ - Contrary to POSIX 2008, on Cygwin, getcwd (3) contains - symlink components. Using - os.path.abspath() works around this limitation. A fix in os.getcwd() - would probably better, in Cygwin even more so, except - that this seems to be by design... - """ - return os.path.abspath(filename) if sys.platform == 'cygwin' else filename - - -def _normalize_cached(filename, _cache={}): - try: - return _cache[filename] - except KeyError: - _cache[filename] = result = normalize_path(filename) - return result - - -def _is_egg_path(path): - """ - Determine if given path appears to be an egg. - """ - return _is_zip_egg(path) or _is_unpacked_egg(path) - - -def _is_zip_egg(path): - return ( - path.lower().endswith('.egg') and - os.path.isfile(path) and - zipfile.is_zipfile(path) - ) - - -def _is_unpacked_egg(path): - """ - Determine if given path appears to be an unpacked egg. - """ - return ( - path.lower().endswith('.egg') and - os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) - ) - - -def _set_parent_ns(packageName): - parts = packageName.split('.') - name = parts.pop() - if parts: - parent = '.'.join(parts) - setattr(sys.modules[parent], name, sys.modules[packageName]) - - -MODULE = re.compile(r"\w+(\.\w+)*$").match -EGG_NAME = re.compile( - r""" - (?P[^-]+) ( - -(?P[^-]+) ( - -py(?P[^-]+) ( - -(?P.+) - )? - )? - )? - """, - re.VERBOSE | re.IGNORECASE, -).match - - -class EntryPoint: - """Object representing an advertised importable object""" - - def __init__(self, name, module_name, attrs=(), extras=(), dist=None): - if not MODULE(module_name): - raise ValueError("Invalid module name", module_name) - self.name = name - self.module_name = module_name - self.attrs = tuple(attrs) - self.extras = tuple(extras) - self.dist = dist - - def __str__(self): - s = "%s = %s" % (self.name, self.module_name) - if self.attrs: - s += ':' + '.'.join(self.attrs) - if self.extras: - s += ' [%s]' % ','.join(self.extras) - return s - - def __repr__(self): - return "EntryPoint.parse(%r)" % str(self) - - def load(self, require=True, *args, **kwargs): - """ - Require packages for this EntryPoint, then resolve it. - """ - if not require or args or kwargs: - warnings.warn( - "Parameters to load are deprecated. Call .resolve and " - ".require separately.", - PkgResourcesDeprecationWarning, - stacklevel=2, - ) - if require: - self.require(*args, **kwargs) - return self.resolve() - - def resolve(self): - """ - Resolve the entry point from its module and attrs. - """ - module = __import__(self.module_name, fromlist=['__name__'], level=0) - try: - return functools.reduce(getattr, self.attrs, module) - except AttributeError as exc: - raise ImportError(str(exc)) from exc - - def require(self, env=None, installer=None): - if self.extras and not self.dist: - raise UnknownExtra("Can't require() without a distribution", self) - - # Get the requirements for this entry point with all its extras and - # then resolve them. We have to pass `extras` along when resolving so - # that the working set knows what extras we want. Otherwise, for - # dist-info distributions, the working set will assume that the - # requirements for that extra are purely optional and skip over them. - reqs = self.dist.requires(self.extras) - items = working_set.resolve(reqs, env, installer, extras=self.extras) - list(map(working_set.add, items)) - - pattern = re.compile( - r'\s*' - r'(?P.+?)\s*' - r'=\s*' - r'(?P[\w.]+)\s*' - r'(:\s*(?P[\w.]+))?\s*' - r'(?P\[.*\])?\s*$' - ) - - @classmethod - def parse(cls, src, dist=None): - """Parse a single entry point from string `src` - - Entry point syntax follows the form:: - - name = some.module:some.attr [extra1, extra2] - - The entry name and module name are required, but the ``:attrs`` and - ``[extras]`` parts are optional - """ - m = cls.pattern.match(src) - if not m: - msg = "EntryPoint must be in 'name=module:attrs [extras]' format" - raise ValueError(msg, src) - res = m.groupdict() - extras = cls._parse_extras(res['extras']) - attrs = res['attr'].split('.') if res['attr'] else () - return cls(res['name'], res['module'], attrs, extras, dist) - - @classmethod - def _parse_extras(cls, extras_spec): - if not extras_spec: - return () - req = Requirement.parse('x' + extras_spec) - if req.specs: - raise ValueError() - return req.extras - - @classmethod - def parse_group(cls, group, lines, dist=None): - """Parse an entry point group""" - if not MODULE(group): - raise ValueError("Invalid group name", group) - this = {} - for line in yield_lines(lines): - ep = cls.parse(line, dist) - if ep.name in this: - raise ValueError("Duplicate entry point", group, ep.name) - this[ep.name] = ep - return this - - @classmethod - def parse_map(cls, data, dist=None): - """Parse a map of entry point groups""" - if isinstance(data, dict): - data = data.items() - else: - data = split_sections(data) - maps = {} - for group, lines in data: - if group is None: - if not lines: - continue - raise ValueError("Entry points must be listed in groups") - group = group.strip() - if group in maps: - raise ValueError("Duplicate group name", group) - maps[group] = cls.parse_group(group, lines, dist) - return maps - - -def _version_from_file(lines): - """ - Given an iterable of lines from a Metadata file, return - the value of the Version field, if present, or None otherwise. - """ - def is_version_line(line): - return line.lower().startswith('version:') - version_lines = filter(is_version_line, lines) - line = next(iter(version_lines), '') - _, _, value = line.partition(':') - return safe_version(value.strip()) or None - - -class Distribution: - """Wrap an actual or potential sys.path entry w/metadata""" - PKG_INFO = 'PKG-INFO' - - def __init__( - self, location=None, metadata=None, project_name=None, - version=None, py_version=PY_MAJOR, platform=None, - precedence=EGG_DIST): - self.project_name = safe_name(project_name or 'Unknown') - if version is not None: - self._version = safe_version(version) - self.py_version = py_version - self.platform = platform - self.location = location - self.precedence = precedence - self._provider = metadata or empty_provider - - @classmethod - def from_location(cls, location, basename, metadata=None, **kw): - project_name, version, py_version, platform = [None] * 4 - basename, ext = os.path.splitext(basename) - if ext.lower() in _distributionImpl: - cls = _distributionImpl[ext.lower()] - - match = EGG_NAME(basename) - if match: - project_name, version, py_version, platform = match.group( - 'name', 'ver', 'pyver', 'plat' - ) - return cls( - location, metadata, project_name=project_name, version=version, - py_version=py_version, platform=platform, **kw - )._reload_version() - - def _reload_version(self): - return self - - @property - def hashcmp(self): - return ( - self.parsed_version, - self.precedence, - self.key, - self.location, - self.py_version or '', - self.platform or '', - ) - - def __hash__(self): - return hash(self.hashcmp) - - def __lt__(self, other): - return self.hashcmp < other.hashcmp - - def __le__(self, other): - return self.hashcmp <= other.hashcmp - - def __gt__(self, other): - return self.hashcmp > other.hashcmp - - def __ge__(self, other): - return self.hashcmp >= other.hashcmp - - def __eq__(self, other): - if not isinstance(other, self.__class__): - # It's not a Distribution, so they are not equal - return False - return self.hashcmp == other.hashcmp - - def __ne__(self, other): - return not self == other - - # These properties have to be lazy so that we don't have to load any - # metadata until/unless it's actually needed. (i.e., some distributions - # may not know their name or version without loading PKG-INFO) - - @property - def key(self): - try: - return self._key - except AttributeError: - self._key = key = self.project_name.lower() - return key - - @property - def parsed_version(self): - if not hasattr(self, "_parsed_version"): - self._parsed_version = parse_version(self.version) - - return self._parsed_version - - def _warn_legacy_version(self): - LV = packaging.version.LegacyVersion - is_legacy = isinstance(self._parsed_version, LV) - if not is_legacy: - return - - # While an empty version is technically a legacy version and - # is not a valid PEP 440 version, it's also unlikely to - # actually come from someone and instead it is more likely that - # it comes from setuptools attempting to parse a filename and - # including it in the list. So for that we'll gate this warning - # on if the version is anything at all or not. - if not self.version: - return - - tmpl = textwrap.dedent(""" - '{project_name} ({version})' is being parsed as a legacy, - non PEP 440, - version. You may find odd behavior and sort order. - In particular it will be sorted as less than 0.0. It - is recommended to migrate to PEP 440 compatible - versions. - """).strip().replace('\n', ' ') - - warnings.warn(tmpl.format(**vars(self)), PEP440Warning) - - @property - def version(self): - try: - return self._version - except AttributeError as e: - version = self._get_version() - if version is None: - path = self._get_metadata_path_for_display(self.PKG_INFO) - msg = ( - "Missing 'Version:' header and/or {} file at path: {}" - ).format(self.PKG_INFO, path) - raise ValueError(msg, self) from e - - return version - - @property - def _dep_map(self): - """ - A map of extra to its list of (direct) requirements - for this distribution, including the null extra. - """ - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._filter_extras(self._build_dep_map()) - return self.__dep_map - - @staticmethod - def _filter_extras(dm): - """ - Given a mapping of extras to dependencies, strip off - environment markers and filter out any dependencies - not matching the markers. - """ - for extra in list(filter(None, dm)): - new_extra = extra - reqs = dm.pop(extra) - new_extra, _, marker = extra.partition(':') - fails_marker = marker and ( - invalid_marker(marker) - or not evaluate_marker(marker) - ) - if fails_marker: - reqs = [] - new_extra = safe_extra(new_extra) or None - - dm.setdefault(new_extra, []).extend(reqs) - return dm - - def _build_dep_map(self): - dm = {} - for name in 'requires.txt', 'depends.txt': - for extra, reqs in split_sections(self._get_metadata(name)): - dm.setdefault(extra, []).extend(parse_requirements(reqs)) - return dm - - def requires(self, extras=()): - """List of Requirements needed for this distro if `extras` are used""" - dm = self._dep_map - deps = [] - deps.extend(dm.get(None, ())) - for ext in extras: - try: - deps.extend(dm[safe_extra(ext)]) - except KeyError as e: - raise UnknownExtra( - "%s has no such extra feature %r" % (self, ext) - ) from e - return deps - - def _get_metadata_path_for_display(self, name): - """ - Return the path to the given metadata file, if available. - """ - try: - # We need to access _get_metadata_path() on the provider object - # directly rather than through this class's __getattr__() - # since _get_metadata_path() is marked private. - path = self._provider._get_metadata_path(name) - - # Handle exceptions e.g. in case the distribution's metadata - # provider doesn't support _get_metadata_path(). - except Exception: - return '[could not detect]' - - return path - - def _get_metadata(self, name): - if self.has_metadata(name): - for line in self.get_metadata_lines(name): - yield line - - def _get_version(self): - lines = self._get_metadata(self.PKG_INFO) - version = _version_from_file(lines) - - return version - - def activate(self, path=None, replace=False): - """Ensure distribution is importable on `path` (default=sys.path)""" - if path is None: - path = sys.path - self.insert_on(path, replace=replace) - if path is sys.path: - fixup_namespace_packages(self.location) - for pkg in self._get_metadata('namespace_packages.txt'): - if pkg in sys.modules: - declare_namespace(pkg) - - def egg_name(self): - """Return what this distribution's standard .egg filename should be""" - filename = "%s-%s-py%s" % ( - to_filename(self.project_name), to_filename(self.version), - self.py_version or PY_MAJOR - ) - - if self.platform: - filename += '-' + self.platform - return filename - - def __repr__(self): - if self.location: - return "%s (%s)" % (self, self.location) - else: - return str(self) - - def __str__(self): - try: - version = getattr(self, 'version', None) - except ValueError: - version = None - version = version or "[unknown version]" - return "%s %s" % (self.project_name, version) - - def __getattr__(self, attr): - """Delegate all unrecognized public attributes to .metadata provider""" - if attr.startswith('_'): - raise AttributeError(attr) - return getattr(self._provider, attr) - - def __dir__(self): - return list( - set(super(Distribution, self).__dir__()) - | set( - attr for attr in self._provider.__dir__() - if not attr.startswith('_') - ) - ) - - @classmethod - def from_filename(cls, filename, metadata=None, **kw): - return cls.from_location( - _normalize_cached(filename), os.path.basename(filename), metadata, - **kw - ) - - def as_requirement(self): - """Return a ``Requirement`` that matches this distribution exactly""" - if isinstance(self.parsed_version, packaging.version.Version): - spec = "%s==%s" % (self.project_name, self.parsed_version) - else: - spec = "%s===%s" % (self.project_name, self.parsed_version) - - return Requirement.parse(spec) - - def load_entry_point(self, group, name): - """Return the `name` entry point of `group` or raise ImportError""" - ep = self.get_entry_info(group, name) - if ep is None: - raise ImportError("Entry point %r not found" % ((group, name),)) - return ep.load() - - def get_entry_map(self, group=None): - """Return the entry point map for `group`, or the full entry map""" - try: - ep_map = self._ep_map - except AttributeError: - ep_map = self._ep_map = EntryPoint.parse_map( - self._get_metadata('entry_points.txt'), self - ) - if group is not None: - return ep_map.get(group, {}) - return ep_map - - def get_entry_info(self, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return self.get_entry_map(group).get(name) - - # FIXME: 'Distribution.insert_on' is too complex (13) - def insert_on(self, path, loc=None, replace=False): # noqa: C901 - """Ensure self.location is on path - - If replace=False (default): - - If location is already in path anywhere, do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent. - - Else: add to the end of path. - If replace=True: - - If location is already on path anywhere (not eggs) - or higher priority than its parent (eggs) - do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent, - removing any lower-priority entries. - - Else: add it to the front of path. - """ - - loc = loc or self.location - if not loc: - return - - nloc = _normalize_cached(loc) - bdir = os.path.dirname(nloc) - npath = [(p and _normalize_cached(p) or p) for p in path] - - for p, item in enumerate(npath): - if item == nloc: - if replace: - break - else: - # don't modify path (even removing duplicates) if - # found and not replace - return - elif item == bdir and self.precedence == EGG_DIST: - # if it's an .egg, give it precedence over its directory - # UNLESS it's already been added to sys.path and replace=False - if (not replace) and nloc in npath[p:]: - return - if path is sys.path: - self.check_version_conflict() - path.insert(p, loc) - npath.insert(p, nloc) - break - else: - if path is sys.path: - self.check_version_conflict() - if replace: - path.insert(0, loc) - else: - path.append(loc) - return - - # p is the spot where we found or inserted loc; now remove duplicates - while True: - try: - np = npath.index(nloc, p + 1) - except ValueError: - break - else: - del npath[np], path[np] - # ha! - p = np - - return - - def check_version_conflict(self): - if self.key == 'setuptools': - # ignore the inevitable setuptools self-conflicts :( - return - - nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) - loc = normalize_path(self.location) - for modname in self._get_metadata('top_level.txt'): - if (modname not in sys.modules or modname in nsp - or modname in _namespace_packages): - continue - if modname in ('pkg_resources', 'setuptools', 'site'): - continue - fn = getattr(sys.modules[modname], '__file__', None) - if fn and (normalize_path(fn).startswith(loc) or - fn.startswith(self.location)): - continue - issue_warning( - "Module %s was already imported from %s, but %s is being added" - " to sys.path" % (modname, fn, self.location), - ) - - def has_version(self): - try: - self.version - except ValueError: - issue_warning("Unbuilt egg for " + repr(self)) - return False - return True - - def clone(self, **kw): - """Copy this distribution, substituting in any changed keyword args""" - names = 'project_name version py_version platform location precedence' - for attr in names.split(): - kw.setdefault(attr, getattr(self, attr, None)) - kw.setdefault('metadata', self._provider) - return self.__class__(**kw) - - @property - def extras(self): - return [dep for dep in self._dep_map if dep] - - -class EggInfoDistribution(Distribution): - def _reload_version(self): - """ - Packages installed by distutils (e.g. numpy or scipy), - which uses an old safe_version, and so - their version numbers can get mangled when - converted to filenames (e.g., 1.11.0.dev0+2329eae to - 1.11.0.dev0_2329eae). These distributions will not be - parsed properly - downstream by Distribution and safe_version, so - take an extra step and try to get the version number from - the metadata file itself instead of the filename. - """ - md_version = self._get_version() - if md_version: - self._version = md_version - return self - - -class DistInfoDistribution(Distribution): - """ - Wrap an actual or potential sys.path entry - w/metadata, .dist-info style. - """ - PKG_INFO = 'METADATA' - EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") - - @property - def _parsed_pkg_info(self): - """Parse and cache metadata""" - try: - return self._pkg_info - except AttributeError: - metadata = self.get_metadata(self.PKG_INFO) - self._pkg_info = email.parser.Parser().parsestr(metadata) - return self._pkg_info - - @property - def _dep_map(self): - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._compute_dependencies() - return self.__dep_map - - def _compute_dependencies(self): - """Recompute this distribution's dependencies.""" - dm = self.__dep_map = {None: []} - - reqs = [] - # Including any condition expressions - for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: - reqs.extend(parse_requirements(req)) - - def reqs_for_extra(extra): - for req in reqs: - if not req.marker or req.marker.evaluate({'extra': extra}): - yield req - - common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None))) - dm[None].extend(common) - - for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: - s_extra = safe_extra(extra.strip()) - dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common] - - return dm - - -_distributionImpl = { - '.egg': Distribution, - '.egg-info': EggInfoDistribution, - '.dist-info': DistInfoDistribution, -} - - -def issue_warning(*args, **kw): - level = 1 - g = globals() - try: - # find the first stack frame that is *not* code in - # the pkg_resources module, to use for the warning - while sys._getframe(level).f_globals is g: - level += 1 - except ValueError: - pass - warnings.warn(stacklevel=level + 1, *args, **kw) - - -def parse_requirements(strs): - """ - Yield ``Requirement`` objects for each specification in `strs`. - - `strs` must be a string, or a (possibly-nested) iterable thereof. - """ - return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs)))) - - -class RequirementParseError(packaging.requirements.InvalidRequirement): - "Compatibility wrapper for InvalidRequirement" - - -class Requirement(packaging.requirements.Requirement): - def __init__(self, requirement_string): - """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - super(Requirement, self).__init__(requirement_string) - self.unsafe_name = self.name - project_name = safe_name(self.name) - self.project_name, self.key = project_name, project_name.lower() - self.specs = [ - (spec.operator, spec.version) for spec in self.specifier] - self.extras = tuple(map(safe_extra, self.extras)) - self.hashCmp = ( - self.key, - self.url, - self.specifier, - frozenset(self.extras), - str(self.marker) if self.marker else None, - ) - self.__hash = hash(self.hashCmp) - - def __eq__(self, other): - return ( - isinstance(other, Requirement) and - self.hashCmp == other.hashCmp - ) - - def __ne__(self, other): - return not self == other - - def __contains__(self, item): - if isinstance(item, Distribution): - if item.key != self.key: - return False - - item = item.version - - # Allow prereleases always in order to match the previous behavior of - # this method. In the future this should be smarter and follow PEP 440 - # more accurately. - return self.specifier.contains(item, prereleases=True) - - def __hash__(self): - return self.__hash - - def __repr__(self): - return "Requirement.parse(%r)" % str(self) - - @staticmethod - def parse(s): - req, = parse_requirements(s) - return req - - -def _always_object(classes): - """ - Ensure object appears in the mro even - for old-style classes. - """ - if object not in classes: - return classes + (object,) - return classes - - -def _find_adapter(registry, ob): - """Return an adapter factory for `ob` from `registry`""" - types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) - for t in types: - if t in registry: - return registry[t] - - -def ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - os.makedirs(dirname, exist_ok=True) - - -def _bypass_ensure_directory(path): - """Sandbox-bypassing version of ensure_directory()""" - if not WRITE_SUPPORT: - raise IOError('"os.mkdir" not supported on this platform.') - dirname, filename = split(path) - if dirname and filename and not isdir(dirname): - _bypass_ensure_directory(dirname) - try: - mkdir(dirname, 0o755) - except FileExistsError: - pass - - -def split_sections(s): - """Split a string or iterable thereof into (section, content) pairs - - Each ``section`` is a stripped version of the section header ("[section]") - and each ``content`` is a list of stripped lines excluding blank lines and - comment-only lines. If there are any such lines before the first section - header, they're returned in a first ``section`` of ``None``. - """ - section = None - content = [] - for line in yield_lines(s): - if line.startswith("["): - if line.endswith("]"): - if section or content: - yield section, content - section = line[1:-1].strip() - content = [] - else: - raise ValueError("Invalid section heading", line) - else: - content.append(line) - - # wrap up last segment - yield section, content - - -def _mkstemp(*args, **kw): - old_open = os.open - try: - # temporarily bypass sandboxing - os.open = os_open - return tempfile.mkstemp(*args, **kw) - finally: - # and then put it back - os.open = old_open - - -# Silence the PEP440Warning by default, so that end users don't get hit by it -# randomly just because they use pkg_resources. We want to append the rule -# because we want earlier uses of filterwarnings to take precedence over this -# one. -warnings.filterwarnings("ignore", category=PEP440Warning, append=True) - - -# from jaraco.functools 1.3 -def _call_aside(f, *args, **kwargs): - f(*args, **kwargs) - return f - - -@_call_aside -def _initialize(g=globals()): - "Set up global resource manager (deliberately not state-saved)" - manager = ResourceManager() - g['_manager'] = manager - g.update( - (name, getattr(manager, name)) - for name in dir(manager) - if not name.startswith('_') - ) - - -class PkgResourcesDeprecationWarning(Warning): - """ - Base class for warning about deprecations in ``pkg_resources`` - - This class is not derived from ``DeprecationWarning``, and as such is - visible by default. - """ - - -@_call_aside -def _initialize_master_working_set(): - """ - Prepare the master working set and make the ``require()`` - API available. - - This function has explicit effects on the global state - of pkg_resources. It is intended to be invoked once at - the initialization of this module. - - Invocation by other packages is unsupported and done - at their own risk. - """ - working_set = WorkingSet._build_master() - _declare_state('object', working_set=working_set) - - require = working_set.require - iter_entry_points = working_set.iter_entry_points - add_activation_listener = working_set.subscribe - run_script = working_set.run_script - # backward compatibility - run_main = run_script - # Activate all distributions already on sys.path with replace=False and - # ensure that all distributions added to the working set in the future - # (e.g. by calling ``require()``) will get activated as well, - # with higher priority (replace=True). - tuple( - dist.activate(replace=False) - for dist in working_set - ) - add_activation_listener( - lambda dist: dist.activate(replace=True), - existing=False, - ) - working_set.entries = [] - # match order - list(map(working_set.add_entry, sys.path)) - globals().update(locals()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/appdirs.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/appdirs.py deleted file mode 100755 index ae67001..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/appdirs.py +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version_info__ = (1, 4, 3) -__version__ = '.'.join(map(str, __version_info__)) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/ - Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\\Application Data\\ - Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ - Win 7 (not roaming): C:\Users\\AppData\Local\\ - Win 7 (roaming): C:\Users\\AppData\Roaming\\ - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/ - Unix: /usr/local/share/ or /usr/share/ - Win XP: C:\Documents and Settings\All Users\Application Data\\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/ - Unix: ~/.cache/ (XDG default) - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache - Vista: C:\Users\\AppData\Local\\\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/ - Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs - Vista: C:\Users\\AppData\Local\\\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - import win32com.shell - _get_win_folder = _get_win_folder_with_pywin32 - except ImportError: - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_common.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_common.py deleted file mode 100755 index a12e2c7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_common.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import pathlib -import tempfile -import functools -import contextlib -import types -import importlib - -from typing import Union, Optional -from .abc import ResourceReader, Traversable - -from ._compat import wrap_spec - -Package = Union[types.ModuleType, str] - - -def files(package): - # type: (Package) -> Traversable - """ - Get a Traversable resource from a package - """ - return from_package(get_package(package)) - - -def get_resource_reader(package): - # type: (types.ModuleType) -> Optional[ResourceReader] - """ - Return the package's loader if it's a ResourceReader. - """ - # We can't use - # a issubclass() check here because apparently abc.'s __subclasscheck__() - # hook wants to create a weak reference to the object, but - # zipimport.zipimporter does not support weak references, resulting in a - # TypeError. That seems terrible. - spec = package.__spec__ - reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore - if reader is None: - return None - return reader(spec.name) # type: ignore - - -def resolve(cand): - # type: (Package) -> types.ModuleType - return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand) - - -def get_package(package): - # type: (Package) -> types.ModuleType - """Take a package name or module object and return the module. - - Raise an exception if the resolved module is not a package. - """ - resolved = resolve(package) - if wrap_spec(resolved).submodule_search_locations is None: - raise TypeError(f'{package!r} is not a package') - return resolved - - -def from_package(package): - """ - Return a Traversable object for the given package. - - """ - spec = wrap_spec(package) - reader = spec.loader.get_resource_reader(spec.name) - return reader.files() - - -@contextlib.contextmanager -def _tempfile(reader, suffix=''): - # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' - # blocks due to the need to close the temporary file to work on Windows - # properly. - fd, raw_path = tempfile.mkstemp(suffix=suffix) - try: - try: - os.write(fd, reader()) - finally: - os.close(fd) - del reader - yield pathlib.Path(raw_path) - finally: - try: - os.remove(raw_path) - except FileNotFoundError: - pass - - -@functools.singledispatch -def as_file(path): - """ - Given a Traversable object, return that object as a - path on the local file system in a context manager. - """ - return _tempfile(path.read_bytes, suffix=path.name) - - -@as_file.register(pathlib.Path) -@contextlib.contextmanager -def _(path): - """ - Degenerate behavior for pathlib.Path objects. - """ - yield path diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_itertools.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_itertools.py deleted file mode 100755 index cce0558..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/importlib_resources/_itertools.py +++ /dev/null @@ -1,35 +0,0 @@ -from itertools import filterfalse - -from typing import ( - Callable, - Iterable, - Iterator, - Optional, - Set, - TypeVar, - Union, -) - -# Type and type variable definitions -_T = TypeVar('_T') -_U = TypeVar('_U') - - -def unique_everseen( - iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None -) -> Iterator[_T]: - "List unique elements, preserving order. Remember all elements ever seen." - # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D - seen: Set[Union[_T, _U]] = set() - seen_add = seen.add - if key is None: - for element in filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/context.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/context.py deleted file mode 100755 index 87a4e3d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/context.py +++ /dev/null @@ -1,213 +0,0 @@ -import os -import subprocess -import contextlib -import functools -import tempfile -import shutil -import operator - - -@contextlib.contextmanager -def pushd(dir): - orig = os.getcwd() - os.chdir(dir) - try: - yield dir - finally: - os.chdir(orig) - - -@contextlib.contextmanager -def tarball_context(url, target_dir=None, runner=None, pushd=pushd): - """ - Get a tarball, extract it, change to that directory, yield, then - clean up. - `runner` is the function to invoke commands. - `pushd` is a context manager for changing the directory. - """ - if target_dir is None: - target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') - if runner is None: - runner = functools.partial(subprocess.check_call, shell=True) - # In the tar command, use --strip-components=1 to strip the first path and - # then - # use -C to cause the files to be extracted to {target_dir}. This ensures - # that we always know where the files were extracted. - runner('mkdir {target_dir}'.format(**vars())) - try: - getter = 'wget {url} -O -' - extract = 'tar x{compression} --strip-components=1 -C {target_dir}' - cmd = ' | '.join((getter, extract)) - runner(cmd.format(compression=infer_compression(url), **vars())) - with pushd(target_dir): - yield target_dir - finally: - runner('rm -Rf {target_dir}'.format(**vars())) - - -def infer_compression(url): - """ - Given a URL or filename, infer the compression code for tar. - """ - # cheat and just assume it's the last two characters - compression_indicator = url[-2:] - mapping = dict(gz='z', bz='j', xz='J') - # Assume 'z' (gzip) if no match - return mapping.get(compression_indicator, 'z') - - -@contextlib.contextmanager -def temp_dir(remover=shutil.rmtree): - """ - Create a temporary directory context. Pass a custom remover - to override the removal behavior. - """ - temp_dir = tempfile.mkdtemp() - try: - yield temp_dir - finally: - remover(temp_dir) - - -@contextlib.contextmanager -def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir): - """ - Check out the repo indicated by url. - - If dest_ctx is supplied, it should be a context manager - to yield the target directory for the check out. - """ - exe = 'git' if 'git' in url else 'hg' - with dest_ctx() as repo_dir: - cmd = [exe, 'clone', url, repo_dir] - if branch: - cmd.extend(['--branch', branch]) - devnull = open(os.path.devnull, 'w') - stdout = devnull if quiet else None - subprocess.check_call(cmd, stdout=stdout) - yield repo_dir - - -@contextlib.contextmanager -def null(): - yield - - -class ExceptionTrap: - """ - A context manager that will catch certain exceptions and provide an - indication they occurred. - - >>> with ExceptionTrap() as trap: - ... raise Exception() - >>> bool(trap) - True - - >>> with ExceptionTrap() as trap: - ... pass - >>> bool(trap) - False - - >>> with ExceptionTrap(ValueError) as trap: - ... raise ValueError("1 + 1 is not 3") - >>> bool(trap) - True - - >>> with ExceptionTrap(ValueError) as trap: - ... raise Exception() - Traceback (most recent call last): - ... - Exception - - >>> bool(trap) - False - """ - - exc_info = None, None, None - - def __init__(self, exceptions=(Exception,)): - self.exceptions = exceptions - - def __enter__(self): - return self - - @property - def type(self): - return self.exc_info[0] - - @property - def value(self): - return self.exc_info[1] - - @property - def tb(self): - return self.exc_info[2] - - def __exit__(self, *exc_info): - type = exc_info[0] - matches = type and issubclass(type, self.exceptions) - if matches: - self.exc_info = exc_info - return matches - - def __bool__(self): - return bool(self.type) - - def raises(self, func, *, _test=bool): - """ - Wrap func and replace the result with the truth - value of the trap (True if an exception occurred). - - First, give the decorator an alias to support Python 3.8 - Syntax. - - >>> raises = ExceptionTrap(ValueError).raises - - Now decorate a function that always fails. - - >>> @raises - ... def fail(): - ... raise ValueError('failed') - >>> fail() - True - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - with ExceptionTrap(self.exceptions) as trap: - func(*args, **kwargs) - return _test(trap) - - return wrapper - - def passes(self, func): - """ - Wrap func and replace the result with the truth - value of the trap (True if no exception). - - First, give the decorator an alias to support Python 3.8 - Syntax. - - >>> passes = ExceptionTrap(ValueError).passes - - Now decorate a function that always fails. - - >>> @passes - ... def fail(): - ... raise ValueError('failed') - - >>> fail() - False - """ - return self.raises(func, _test=operator.not_) - - -class suppress(contextlib.suppress, contextlib.ContextDecorator): - """ - A version of contextlib.suppress with decorator support. - - >>> @suppress(KeyError) - ... def key_error(): - ... {}[''] - >>> key_error() - """ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/functools.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/functools.py deleted file mode 100755 index a3fea3a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/functools.py +++ /dev/null @@ -1,525 +0,0 @@ -import functools -import time -import inspect -import collections -import types -import itertools - -import pkg_resources.extern.more_itertools - -from typing import Callable, TypeVar - - -CallableT = TypeVar("CallableT", bound=Callable[..., object]) - - -def compose(*funcs): - """ - Compose any number of unary functions into a single unary function. - - >>> import textwrap - >>> expected = str.strip(textwrap.dedent(compose.__doc__)) - >>> strip_and_dedent = compose(str.strip, textwrap.dedent) - >>> strip_and_dedent(compose.__doc__) == expected - True - - Compose also allows the innermost function to take arbitrary arguments. - - >>> round_three = lambda x: round(x, ndigits=3) - >>> f = compose(round_three, int.__truediv__) - >>> [f(3*x, x+1) for x in range(1,10)] - [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] - """ - - def compose_two(f1, f2): - return lambda *args, **kwargs: f1(f2(*args, **kwargs)) - - return functools.reduce(compose_two, funcs) - - -def method_caller(method_name, *args, **kwargs): - """ - Return a function that will call a named method on the - target object with optional positional and keyword - arguments. - - >>> lower = method_caller('lower') - >>> lower('MyString') - 'mystring' - """ - - def call_method(target): - func = getattr(target, method_name) - return func(*args, **kwargs) - - return call_method - - -def once(func): - """ - Decorate func so it's only ever called the first time. - - This decorator can ensure that an expensive or non-idempotent function - will not be expensive on subsequent calls and is idempotent. - - >>> add_three = once(lambda a: a+3) - >>> add_three(3) - 6 - >>> add_three(9) - 6 - >>> add_three('12') - 6 - - To reset the stored value, simply clear the property ``saved_result``. - - >>> del add_three.saved_result - >>> add_three(9) - 12 - >>> add_three(8) - 12 - - Or invoke 'reset()' on it. - - >>> add_three.reset() - >>> add_three(-3) - 0 - >>> add_three(0) - 0 - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - if not hasattr(wrapper, 'saved_result'): - wrapper.saved_result = func(*args, **kwargs) - return wrapper.saved_result - - wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result') - return wrapper - - -def method_cache( - method: CallableT, - cache_wrapper: Callable[ - [CallableT], CallableT - ] = functools.lru_cache(), # type: ignore[assignment] -) -> CallableT: - """ - Wrap lru_cache to support storing the cache data in the object instances. - - Abstracts the common paradigm where the method explicitly saves an - underscore-prefixed protected property on first call and returns that - subsequently. - - >>> class MyClass: - ... calls = 0 - ... - ... @method_cache - ... def method(self, value): - ... self.calls += 1 - ... return value - - >>> a = MyClass() - >>> a.method(3) - 3 - >>> for x in range(75): - ... res = a.method(x) - >>> a.calls - 75 - - Note that the apparent behavior will be exactly like that of lru_cache - except that the cache is stored on each instance, so values in one - instance will not flush values from another, and when an instance is - deleted, so are the cached values for that instance. - - >>> b = MyClass() - >>> for x in range(35): - ... res = b.method(x) - >>> b.calls - 35 - >>> a.method(0) - 0 - >>> a.calls - 75 - - Note that if method had been decorated with ``functools.lru_cache()``, - a.calls would have been 76 (due to the cached value of 0 having been - flushed by the 'b' instance). - - Clear the cache with ``.cache_clear()`` - - >>> a.method.cache_clear() - - Same for a method that hasn't yet been called. - - >>> c = MyClass() - >>> c.method.cache_clear() - - Another cache wrapper may be supplied: - - >>> cache = functools.lru_cache(maxsize=2) - >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) - >>> a = MyClass() - >>> a.method2() - 3 - - Caution - do not subsequently wrap the method with another decorator, such - as ``@property``, which changes the semantics of the function. - - See also - http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ - for another implementation and additional justification. - """ - - def wrapper(self: object, *args: object, **kwargs: object) -> object: - # it's the first call, replace the method with a cached, bound method - bound_method: CallableT = types.MethodType( # type: ignore[assignment] - method, self - ) - cached_method = cache_wrapper(bound_method) - setattr(self, method.__name__, cached_method) - return cached_method(*args, **kwargs) - - # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None # type: ignore[attr-defined] - - return ( # type: ignore[return-value] - _special_method_cache(method, cache_wrapper) or wrapper - ) - - -def _special_method_cache(method, cache_wrapper): - """ - Because Python treats special methods differently, it's not - possible to use instance attributes to implement the cached - methods. - - Instead, install the wrapper method under a different name - and return a simple proxy to that wrapper. - - https://github.com/jaraco/jaraco.functools/issues/5 - """ - name = method.__name__ - special_names = '__getattr__', '__getitem__' - if name not in special_names: - return - - wrapper_name = '__cached' + name - - def proxy(self, *args, **kwargs): - if wrapper_name not in vars(self): - bound = types.MethodType(method, self) - cache = cache_wrapper(bound) - setattr(self, wrapper_name, cache) - else: - cache = getattr(self, wrapper_name) - return cache(*args, **kwargs) - - return proxy - - -def apply(transform): - """ - Decorate a function with a transform function that is - invoked on results returned from the decorated function. - - >>> @apply(reversed) - ... def get_numbers(start): - ... "doc for get_numbers" - ... return range(start, start+3) - >>> list(get_numbers(4)) - [6, 5, 4] - >>> get_numbers.__doc__ - 'doc for get_numbers' - """ - - def wrap(func): - return functools.wraps(func)(compose(transform, func)) - - return wrap - - -def result_invoke(action): - r""" - Decorate a function with an action function that is - invoked on the results returned from the decorated - function (for its side-effect), then return the original - result. - - >>> @result_invoke(print) - ... def add_two(a, b): - ... return a + b - >>> x = add_two(2, 3) - 5 - >>> x - 5 - """ - - def wrap(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - action(result) - return result - - return wrapper - - return wrap - - -def call_aside(f, *args, **kwargs): - """ - Call a function for its side effect after initialization. - - >>> @call_aside - ... def func(): print("called") - called - >>> func() - called - - Use functools.partial to pass parameters to the initial call - - >>> @functools.partial(call_aside, name='bingo') - ... def func(name): print("called with", name) - called with bingo - """ - f(*args, **kwargs) - return f - - -class Throttler: - """ - Rate-limit a function (or other callable) - """ - - def __init__(self, func, max_rate=float('Inf')): - if isinstance(func, Throttler): - func = func.func - self.func = func - self.max_rate = max_rate - self.reset() - - def reset(self): - self.last_called = 0 - - def __call__(self, *args, **kwargs): - self._wait() - return self.func(*args, **kwargs) - - def _wait(self): - "ensure at least 1/max_rate seconds from last call" - elapsed = time.time() - self.last_called - must_wait = 1 / self.max_rate - elapsed - time.sleep(max(0, must_wait)) - self.last_called = time.time() - - def __get__(self, obj, type=None): - return first_invoke(self._wait, functools.partial(self.func, obj)) - - -def first_invoke(func1, func2): - """ - Return a function that when invoked will invoke func1 without - any parameters (for its side-effect) and then invoke func2 - with whatever parameters were passed, returning its result. - """ - - def wrapper(*args, **kwargs): - func1() - return func2(*args, **kwargs) - - return wrapper - - -def retry_call(func, cleanup=lambda: None, retries=0, trap=()): - """ - Given a callable func, trap the indicated exceptions - for up to 'retries' times, invoking cleanup on the - exception. On the final attempt, allow any exceptions - to propagate. - """ - attempts = itertools.count() if retries == float('inf') else range(retries) - for attempt in attempts: - try: - return func() - except trap: - cleanup() - - return func() - - -def retry(*r_args, **r_kwargs): - """ - Decorator wrapper for retry_call. Accepts arguments to retry_call - except func and then returns a decorator for the decorated function. - - Ex: - - >>> @retry(retries=3) - ... def my_func(a, b): - ... "this is my funk" - ... print(a, b) - >>> my_func.__doc__ - 'this is my funk' - """ - - def decorate(func): - @functools.wraps(func) - def wrapper(*f_args, **f_kwargs): - bound = functools.partial(func, *f_args, **f_kwargs) - return retry_call(bound, *r_args, **r_kwargs) - - return wrapper - - return decorate - - -def print_yielded(func): - """ - Convert a generator into a function that prints all yielded elements - - >>> @print_yielded - ... def x(): - ... yield 3; yield None - >>> x() - 3 - None - """ - print_all = functools.partial(map, print) - print_results = compose(more_itertools.consume, print_all, func) - return functools.wraps(func)(print_results) - - -def pass_none(func): - """ - Wrap func so it's not called if its first param is None - - >>> print_text = pass_none(print) - >>> print_text('text') - text - >>> print_text(None) - """ - - @functools.wraps(func) - def wrapper(param, *args, **kwargs): - if param is not None: - return func(param, *args, **kwargs) - - return wrapper - - -def assign_params(func, namespace): - """ - Assign parameters from namespace where func solicits. - - >>> def func(x, y=3): - ... print(x, y) - >>> assigned = assign_params(func, dict(x=2, z=4)) - >>> assigned() - 2 3 - - The usual errors are raised if a function doesn't receive - its required parameters: - - >>> assigned = assign_params(func, dict(y=3, z=4)) - >>> assigned() - Traceback (most recent call last): - TypeError: func() ...argument... - - It even works on methods: - - >>> class Handler: - ... def meth(self, arg): - ... print(arg) - >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() - crystal - """ - sig = inspect.signature(func) - params = sig.parameters.keys() - call_ns = {k: namespace[k] for k in params if k in namespace} - return functools.partial(func, **call_ns) - - -def save_method_args(method): - """ - Wrap a method such that when it is called, the args and kwargs are - saved on the method. - - >>> class MyClass: - ... @save_method_args - ... def method(self, a, b): - ... print(a, b) - >>> my_ob = MyClass() - >>> my_ob.method(1, 2) - 1 2 - >>> my_ob._saved_method.args - (1, 2) - >>> my_ob._saved_method.kwargs - {} - >>> my_ob.method(a=3, b='foo') - 3 foo - >>> my_ob._saved_method.args - () - >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') - True - - The arguments are stored on the instance, allowing for - different instance to save different args. - - >>> your_ob = MyClass() - >>> your_ob.method({str('x'): 3}, b=[4]) - {'x': 3} [4] - >>> your_ob._saved_method.args - ({'x': 3},) - >>> my_ob._saved_method.args - () - """ - args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') - - @functools.wraps(method) - def wrapper(self, *args, **kwargs): - attr_name = '_saved_' + method.__name__ - attr = args_and_kwargs(args, kwargs) - setattr(self, attr_name, attr) - return method(self, *args, **kwargs) - - return wrapper - - -def except_(*exceptions, replace=None, use=None): - """ - Replace the indicated exceptions, if raised, with the indicated - literal replacement or evaluated expression (if present). - - >>> safe_int = except_(ValueError)(int) - >>> safe_int('five') - >>> safe_int('5') - 5 - - Specify a literal replacement with ``replace``. - - >>> safe_int_r = except_(ValueError, replace=0)(int) - >>> safe_int_r('five') - 0 - - Provide an expression to ``use`` to pass through particular parameters. - - >>> safe_int_pt = except_(ValueError, use='args[0]')(int) - >>> safe_int_pt('five') - 'five' - - """ - - def decorate(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except exceptions: - try: - return eval(use) - except TypeError: - return replace - - return wrapper - - return decorate diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/text/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/text/__init__.py deleted file mode 100755 index c466378..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/jaraco/text/__init__.py +++ /dev/null @@ -1,599 +0,0 @@ -import re -import itertools -import textwrap -import functools - -try: - from importlib.resources import files # type: ignore -except ImportError: # pragma: nocover - from pkg_resources.extern.importlib_resources import files # type: ignore - -from pkg_resources.extern.jaraco.functools import compose, method_cache -from pkg_resources.extern.jaraco.context import ExceptionTrap - - -def substitution(old, new): - """ - Return a function that will perform a substitution on a string - """ - return lambda s: s.replace(old, new) - - -def multi_substitution(*substitutions): - """ - Take a sequence of pairs specifying substitutions, and create - a function that performs those substitutions. - - >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo') - 'baz' - """ - substitutions = itertools.starmap(substitution, substitutions) - # compose function applies last function first, so reverse the - # substitutions to get the expected order. - substitutions = reversed(tuple(substitutions)) - return compose(*substitutions) - - -class FoldedCase(str): - """ - A case insensitive string class; behaves just like str - except compares equal when the only variation is case. - - >>> s = FoldedCase('hello world') - - >>> s == 'Hello World' - True - - >>> 'Hello World' == s - True - - >>> s != 'Hello World' - False - - >>> s.index('O') - 4 - - >>> s.split('O') - ['hell', ' w', 'rld'] - - >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) - ['alpha', 'Beta', 'GAMMA'] - - Sequence membership is straightforward. - - >>> "Hello World" in [s] - True - >>> s in ["Hello World"] - True - - You may test for set inclusion, but candidate and elements - must both be folded. - - >>> FoldedCase("Hello World") in {s} - True - >>> s in {FoldedCase("Hello World")} - True - - String inclusion works as long as the FoldedCase object - is on the right. - - >>> "hello" in FoldedCase("Hello World") - True - - But not if the FoldedCase object is on the left: - - >>> FoldedCase('hello') in 'Hello World' - False - - In that case, use ``in_``: - - >>> FoldedCase('hello').in_('Hello World') - True - - >>> FoldedCase('hello') > FoldedCase('Hello') - False - """ - - def __lt__(self, other): - return self.lower() < other.lower() - - def __gt__(self, other): - return self.lower() > other.lower() - - def __eq__(self, other): - return self.lower() == other.lower() - - def __ne__(self, other): - return self.lower() != other.lower() - - def __hash__(self): - return hash(self.lower()) - - def __contains__(self, other): - return super().lower().__contains__(other.lower()) - - def in_(self, other): - "Does self appear in other?" - return self in FoldedCase(other) - - # cache lower since it's likely to be called frequently. - @method_cache - def lower(self): - return super().lower() - - def index(self, sub): - return self.lower().index(sub.lower()) - - def split(self, splitter=' ', maxsplit=0): - pattern = re.compile(re.escape(splitter), re.I) - return pattern.split(self, maxsplit) - - -# Python 3.8 compatibility -_unicode_trap = ExceptionTrap(UnicodeDecodeError) - - -@_unicode_trap.passes -def is_decodable(value): - r""" - Return True if the supplied value is decodable (using the default - encoding). - - >>> is_decodable(b'\xff') - False - >>> is_decodable(b'\x32') - True - """ - value.decode() - - -def is_binary(value): - r""" - Return True if the value appears to be binary (that is, it's a byte - string and isn't decodable). - - >>> is_binary(b'\xff') - True - >>> is_binary('\xff') - False - """ - return isinstance(value, bytes) and not is_decodable(value) - - -def trim(s): - r""" - Trim something like a docstring to remove the whitespace that - is common due to indentation and formatting. - - >>> trim("\n\tfoo = bar\n\t\tbar = baz\n") - 'foo = bar\n\tbar = baz' - """ - return textwrap.dedent(s).strip() - - -def wrap(s): - """ - Wrap lines of text, retaining existing newlines as - paragraph markers. - - >>> print(wrap(lorem_ipsum)) - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do - eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad - minim veniam, quis nostrud exercitation ullamco laboris nisi ut - aliquip ex ea commodo consequat. Duis aute irure dolor in - reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla - pariatur. Excepteur sint occaecat cupidatat non proident, sunt in - culpa qui officia deserunt mollit anim id est laborum. - - Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam - varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus - magna felis sollicitudin mauris. Integer in mauris eu nibh euismod - gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis - risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, - eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas - fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla - a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, - neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing - sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque - nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus - quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, - molestie eu, feugiat in, orci. In hac habitasse platea dictumst. - """ - paragraphs = s.splitlines() - wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs) - return '\n\n'.join(wrapped) - - -def unwrap(s): - r""" - Given a multi-line string, return an unwrapped version. - - >>> wrapped = wrap(lorem_ipsum) - >>> wrapped.count('\n') - 20 - >>> unwrapped = unwrap(wrapped) - >>> unwrapped.count('\n') - 1 - >>> print(unwrapped) - Lorem ipsum dolor sit amet, consectetur adipiscing ... - Curabitur pretium tincidunt lacus. Nulla gravida orci ... - - """ - paragraphs = re.split(r'\n\n+', s) - cleaned = (para.replace('\n', ' ') for para in paragraphs) - return '\n'.join(cleaned) - - - - -class Splitter(object): - """object that will split a string with the given arguments for each call - - >>> s = Splitter(',') - >>> s('hello, world, this is your, master calling') - ['hello', ' world', ' this is your', ' master calling'] - """ - - def __init__(self, *args): - self.args = args - - def __call__(self, s): - return s.split(*self.args) - - -def indent(string, prefix=' ' * 4): - """ - >>> indent('foo') - ' foo' - """ - return prefix + string - - -class WordSet(tuple): - """ - Given an identifier, return the words that identifier represents, - whether in camel case, underscore-separated, etc. - - >>> WordSet.parse("camelCase") - ('camel', 'Case') - - >>> WordSet.parse("under_sep") - ('under', 'sep') - - Acronyms should be retained - - >>> WordSet.parse("firstSNL") - ('first', 'SNL') - - >>> WordSet.parse("you_and_I") - ('you', 'and', 'I') - - >>> WordSet.parse("A simple test") - ('A', 'simple', 'test') - - Multiple caps should not interfere with the first cap of another word. - - >>> WordSet.parse("myABCClass") - ('my', 'ABC', 'Class') - - The result is a WordSet, so you can get the form you need. - - >>> WordSet.parse("myABCClass").underscore_separated() - 'my_ABC_Class' - - >>> WordSet.parse('a-command').camel_case() - 'ACommand' - - >>> WordSet.parse('someIdentifier').lowered().space_separated() - 'some identifier' - - Slices of the result should return another WordSet. - - >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated() - 'out_of_context' - - >>> WordSet.from_class_name(WordSet()).lowered().space_separated() - 'word set' - - >>> example = WordSet.parse('figured it out') - >>> example.headless_camel_case() - 'figuredItOut' - >>> example.dash_separated() - 'figured-it-out' - - """ - - _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))') - - def capitalized(self): - return WordSet(word.capitalize() for word in self) - - def lowered(self): - return WordSet(word.lower() for word in self) - - def camel_case(self): - return ''.join(self.capitalized()) - - def headless_camel_case(self): - words = iter(self) - first = next(words).lower() - new_words = itertools.chain((first,), WordSet(words).camel_case()) - return ''.join(new_words) - - def underscore_separated(self): - return '_'.join(self) - - def dash_separated(self): - return '-'.join(self) - - def space_separated(self): - return ' '.join(self) - - def trim_right(self, item): - """ - Remove the item from the end of the set. - - >>> WordSet.parse('foo bar').trim_right('foo') - ('foo', 'bar') - >>> WordSet.parse('foo bar').trim_right('bar') - ('foo',) - >>> WordSet.parse('').trim_right('bar') - () - """ - return self[:-1] if self and self[-1] == item else self - - def trim_left(self, item): - """ - Remove the item from the beginning of the set. - - >>> WordSet.parse('foo bar').trim_left('foo') - ('bar',) - >>> WordSet.parse('foo bar').trim_left('bar') - ('foo', 'bar') - >>> WordSet.parse('').trim_left('bar') - () - """ - return self[1:] if self and self[0] == item else self - - def trim(self, item): - """ - >>> WordSet.parse('foo bar').trim('foo') - ('bar',) - """ - return self.trim_left(item).trim_right(item) - - def __getitem__(self, item): - result = super(WordSet, self).__getitem__(item) - if isinstance(item, slice): - result = WordSet(result) - return result - - @classmethod - def parse(cls, identifier): - matches = cls._pattern.finditer(identifier) - return WordSet(match.group(0) for match in matches) - - @classmethod - def from_class_name(cls, subject): - return cls.parse(subject.__class__.__name__) - - -# for backward compatibility -words = WordSet.parse - - -def simple_html_strip(s): - r""" - Remove HTML from the string `s`. - - >>> str(simple_html_strip('')) - '' - - >>> print(simple_html_strip('A stormy day in paradise')) - A stormy day in paradise - - >>> print(simple_html_strip('Somebody tell the truth.')) - Somebody tell the truth. - - >>> print(simple_html_strip('What about
\nmultiple lines?')) - What about - multiple lines? - """ - html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL) - texts = (match.group(3) or '' for match in html_stripper.finditer(s)) - return ''.join(texts) - - -class SeparatedValues(str): - """ - A string separated by a separator. Overrides __iter__ for getting - the values. - - >>> list(SeparatedValues('a,b,c')) - ['a', 'b', 'c'] - - Whitespace is stripped and empty values are discarded. - - >>> list(SeparatedValues(' a, b , c, ')) - ['a', 'b', 'c'] - """ - - separator = ',' - - def __iter__(self): - parts = self.split(self.separator) - return filter(None, (part.strip() for part in parts)) - - -class Stripper: - r""" - Given a series of lines, find the common prefix and strip it from them. - - >>> lines = [ - ... 'abcdefg\n', - ... 'abc\n', - ... 'abcde\n', - ... ] - >>> res = Stripper.strip_prefix(lines) - >>> res.prefix - 'abc' - >>> list(res.lines) - ['defg\n', '\n', 'de\n'] - - If no prefix is common, nothing should be stripped. - - >>> lines = [ - ... 'abcd\n', - ... '1234\n', - ... ] - >>> res = Stripper.strip_prefix(lines) - >>> res.prefix = '' - >>> list(res.lines) - ['abcd\n', '1234\n'] - """ - - def __init__(self, prefix, lines): - self.prefix = prefix - self.lines = map(self, lines) - - @classmethod - def strip_prefix(cls, lines): - prefix_lines, lines = itertools.tee(lines) - prefix = functools.reduce(cls.common_prefix, prefix_lines) - return cls(prefix, lines) - - def __call__(self, line): - if not self.prefix: - return line - null, prefix, rest = line.partition(self.prefix) - return rest - - @staticmethod - def common_prefix(s1, s2): - """ - Return the common prefix of two lines. - """ - index = min(len(s1), len(s2)) - while s1[:index] != s2[:index]: - index -= 1 - return s1[:index] - - -def remove_prefix(text, prefix): - """ - Remove the prefix from the text if it exists. - - >>> remove_prefix('underwhelming performance', 'underwhelming ') - 'performance' - - >>> remove_prefix('something special', 'sample') - 'something special' - """ - null, prefix, rest = text.rpartition(prefix) - return rest - - -def remove_suffix(text, suffix): - """ - Remove the suffix from the text if it exists. - - >>> remove_suffix('name.git', '.git') - 'name' - - >>> remove_suffix('something special', 'sample') - 'something special' - """ - rest, suffix, null = text.partition(suffix) - return rest - - -def normalize_newlines(text): - r""" - Replace alternate newlines with the canonical newline. - - >>> normalize_newlines('Lorem Ipsum\u2029') - 'Lorem Ipsum\n' - >>> normalize_newlines('Lorem Ipsum\r\n') - 'Lorem Ipsum\n' - >>> normalize_newlines('Lorem Ipsum\x85') - 'Lorem Ipsum\n' - """ - newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029'] - pattern = '|'.join(newlines) - return re.sub(pattern, '\n', text) - - -def _nonblank(str): - return str and not str.startswith('#') - - -@functools.singledispatch -def yield_lines(iterable): - r""" - Yield valid lines of a string or iterable. - - >>> list(yield_lines('')) - [] - >>> list(yield_lines(['foo', 'bar'])) - ['foo', 'bar'] - >>> list(yield_lines('foo\nbar')) - ['foo', 'bar'] - >>> list(yield_lines('\nfoo\n#bar\nbaz #comment')) - ['foo', 'baz #comment'] - >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n'])) - ['foo', 'bar', 'baz', 'bing'] - """ - return itertools.chain.from_iterable(map(yield_lines, iterable)) - - -@yield_lines.register(str) -def _(text): - return filter(_nonblank, map(str.strip, text.splitlines())) - - -def drop_comment(line): - """ - Drop comments. - - >>> drop_comment('foo # bar') - 'foo' - - A hash without a space may be in a URL. - - >>> drop_comment('http://example.com/foo#bar') - 'http://example.com/foo#bar' - """ - return line.partition(' #')[0] - - -def join_continuation(lines): - r""" - Join lines continued by a trailing backslash. - - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar \\', 'baz'])) - ['foobarbaz'] - - Not sure why, but... - The character preceeding the backslash is also elided. - - >>> list(join_continuation(['goo\\', 'dly'])) - ['godly'] - - A terrible idea, but... - If no line is available to continue, suppress the lines. - - >>> list(join_continuation(['foo', 'bar\\', 'baz\\'])) - ['foo'] - """ - lines = iter(lines) - for item in lines: - while item.endswith('\\'): - try: - item = item[:-2].strip() + next(lines) - except StopIteration: - return - yield item diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/__init__.py deleted file mode 100755 index ea38bef..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .more import * # noqa -from .recipes import * # noqa - -__version__ = '8.12.0' diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/more.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/more.py deleted file mode 100755 index 630af97..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/more.py +++ /dev/null @@ -1,4317 +0,0 @@ -import warnings - -from collections import Counter, defaultdict, deque, abc -from collections.abc import Sequence -from concurrent.futures import ThreadPoolExecutor -from functools import partial, reduce, wraps -from heapq import merge, heapify, heapreplace, heappop -from itertools import ( - chain, - compress, - count, - cycle, - dropwhile, - groupby, - islice, - repeat, - starmap, - takewhile, - tee, - zip_longest, -) -from math import exp, factorial, floor, log -from queue import Empty, Queue -from random import random, randrange, uniform -from operator import itemgetter, mul, sub, gt, lt, ge, le -from sys import hexversion, maxsize -from time import monotonic - -from .recipes import ( - consume, - flatten, - pairwise, - powerset, - take, - unique_everseen, -) - -__all__ = [ - 'AbortThread', - 'SequenceView', - 'UnequalIterablesError', - 'adjacent', - 'all_unique', - 'always_iterable', - 'always_reversible', - 'bucket', - 'callback_iter', - 'chunked', - 'chunked_even', - 'circular_shifts', - 'collapse', - 'collate', - 'combination_index', - 'consecutive_groups', - 'consumer', - 'count_cycle', - 'countable', - 'difference', - 'distinct_combinations', - 'distinct_permutations', - 'distribute', - 'divide', - 'duplicates_everseen', - 'duplicates_justseen', - 'exactly_n', - 'filter_except', - 'first', - 'groupby_transform', - 'ichunked', - 'ilen', - 'interleave', - 'interleave_evenly', - 'interleave_longest', - 'intersperse', - 'is_sorted', - 'islice_extended', - 'iterate', - 'last', - 'locate', - 'lstrip', - 'make_decorator', - 'map_except', - 'map_if', - 'map_reduce', - 'mark_ends', - 'minmax', - 'nth_or_last', - 'nth_permutation', - 'nth_product', - 'numeric_range', - 'one', - 'only', - 'padded', - 'partitions', - 'peekable', - 'permutation_index', - 'product_index', - 'raise_', - 'repeat_each', - 'repeat_last', - 'replace', - 'rlocate', - 'rstrip', - 'run_length', - 'sample', - 'seekable', - 'set_partitions', - 'side_effect', - 'sliced', - 'sort_together', - 'split_after', - 'split_at', - 'split_before', - 'split_into', - 'split_when', - 'spy', - 'stagger', - 'strip', - 'strictly_n', - 'substrings', - 'substrings_indexes', - 'time_limited', - 'unique_in_window', - 'unique_to_each', - 'unzip', - 'value_chain', - 'windowed', - 'windowed_complete', - 'with_iter', - 'zip_broadcast', - 'zip_equal', - 'zip_offset', -] - - -_marker = object() - - -def chunked(iterable, n, strict=False): - """Break *iterable* into lists of length *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) - [[1, 2, 3], [4, 5, 6]] - - By the default, the last yielded list will have fewer than *n* elements - if the length of *iterable* is not divisible by *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) - [[1, 2, 3], [4, 5, 6], [7, 8]] - - To use a fill-in value instead, see the :func:`grouper` recipe. - - If the length of *iterable* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - list is yielded. - - """ - iterator = iter(partial(take, n, iter(iterable)), []) - if strict: - if n is None: - raise ValueError('n must not be None when using strict mode.') - - def ret(): - for chunk in iterator: - if len(chunk) != n: - raise ValueError('iterable is not divisible by n.') - yield chunk - - return iter(ret()) - else: - return iterator - - -def first(iterable, default=_marker): - """Return the first item of *iterable*, or *default* if *iterable* is - empty. - - >>> first([0, 1, 2, 3]) - 0 - >>> first([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - - :func:`first` is useful when you have a generator of expensive-to-retrieve - values and want any arbitrary one. It is marginally shorter than - ``next(iter(iterable), default)``. - - """ - try: - return next(iter(iterable)) - except StopIteration as e: - if default is _marker: - raise ValueError( - 'first() was called on an empty iterable, and no ' - 'default value was provided.' - ) from e - return default - - -def last(iterable, default=_marker): - """Return the last item of *iterable*, or *default* if *iterable* is - empty. - - >>> last([0, 1, 2, 3]) - 3 - >>> last([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - try: - if isinstance(iterable, Sequence): - return iterable[-1] - # Work around https://bugs.python.org/issue38525 - elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): - return next(reversed(iterable)) - else: - return deque(iterable, maxlen=1)[-1] - except (IndexError, TypeError, StopIteration): - if default is _marker: - raise ValueError( - 'last() was called on an empty iterable, and no default was ' - 'provided.' - ) - return default - - -def nth_or_last(iterable, n, default=_marker): - """Return the nth or the last item of *iterable*, - or *default* if *iterable* is empty. - - >>> nth_or_last([0, 1, 2, 3], 2) - 2 - >>> nth_or_last([0, 1], 2) - 1 - >>> nth_or_last([], 0, 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - return last(islice(iterable, n + 1), default=default) - - -class peekable: - """Wrap an iterator to allow lookahead and prepending elements. - - Call :meth:`peek` on the result to get the value that will be returned - by :func:`next`. This won't advance the iterator: - - >>> p = peekable(['a', 'b']) - >>> p.peek() - 'a' - >>> next(p) - 'a' - - Pass :meth:`peek` a default value to return that instead of raising - ``StopIteration`` when the iterator is exhausted. - - >>> p = peekable([]) - >>> p.peek('hi') - 'hi' - - peekables also offer a :meth:`prepend` method, which "inserts" items - at the head of the iterable: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> p.peek() - 11 - >>> list(p) - [11, 12, 1, 2, 3] - - peekables can be indexed. Index 0 is the item that will be returned by - :func:`next`, index 1 is the item after that, and so on: - The values up to the given index will be cached. - - >>> p = peekable(['a', 'b', 'c', 'd']) - >>> p[0] - 'a' - >>> p[1] - 'b' - >>> next(p) - 'a' - - Negative indexes are supported, but be aware that they will cache the - remaining items in the source iterator, which may require significant - storage. - - To check whether a peekable is exhausted, check its truth value: - - >>> p = peekable(['a', 'b']) - >>> if p: # peekable has items - ... list(p) - ['a', 'b'] - >>> if not p: # peekable is exhausted - ... list(p) - [] - - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self._cache = deque() - - def __iter__(self): - return self - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - """Return the item that will be next returned from ``next()``. - - Return ``default`` if there are no items left. If ``default`` is not - provided, raise ``StopIteration``. - - """ - if not self._cache: - try: - self._cache.append(next(self._it)) - except StopIteration: - if default is _marker: - raise - return default - return self._cache[0] - - def prepend(self, *items): - """Stack up items to be the next ones returned from ``next()`` or - ``self.peek()``. The items will be returned in - first in, first out order:: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> list(p) - [11, 12, 1, 2, 3] - - It is possible, by prepending items, to "resurrect" a peekable that - previously raised ``StopIteration``. - - >>> p = peekable([]) - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - >>> p.prepend(1) - >>> next(p) - 1 - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - - """ - self._cache.extendleft(reversed(items)) - - def __next__(self): - if self._cache: - return self._cache.popleft() - - return next(self._it) - - def _get_slice(self, index): - # Normalize the slice's arguments - step = 1 if (index.step is None) else index.step - if step > 0: - start = 0 if (index.start is None) else index.start - stop = maxsize if (index.stop is None) else index.stop - elif step < 0: - start = -1 if (index.start is None) else index.start - stop = (-maxsize - 1) if (index.stop is None) else index.stop - else: - raise ValueError('slice step cannot be zero') - - # If either the start or stop index is negative, we'll need to cache - # the rest of the iterable in order to slice from the right side. - if (start < 0) or (stop < 0): - self._cache.extend(self._it) - # Otherwise we'll need to find the rightmost index and cache to that - # point. - else: - n = min(max(start, stop) + 1, maxsize) - cache_len = len(self._cache) - if n >= cache_len: - self._cache.extend(islice(self._it, n - cache_len)) - - return list(self._cache)[index] - - def __getitem__(self, index): - if isinstance(index, slice): - return self._get_slice(index) - - cache_len = len(self._cache) - if index < 0: - self._cache.extend(self._it) - elif index >= cache_len: - self._cache.extend(islice(self._it, index + 1 - cache_len)) - - return self._cache[index] - - -def collate(*iterables, **kwargs): - """Return a sorted merge of the items from each of several already-sorted - *iterables*. - - >>> list(collate('ACDZ', 'AZ', 'JKL')) - ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] - - Works lazily, keeping only the next value from each iterable in memory. Use - :func:`collate` to, for example, perform a n-way mergesort of items that - don't fit in memory. - - If a *key* function is specified, the iterables will be sorted according - to its result: - - >>> key = lambda s: int(s) # Sort by numeric value, not by string - >>> list(collate(['1', '10'], ['2', '11'], key=key)) - ['1', '2', '10', '11'] - - - If the *iterables* are sorted in descending order, set *reverse* to - ``True``: - - >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) - [5, 4, 3, 2, 1, 0] - - If the elements of the passed-in iterables are out of order, you might get - unexpected results. - - On Python 3.5+, this function is an alias for :func:`heapq.merge`. - - """ - warnings.warn( - "collate is no longer part of more_itertools, use heapq.merge", - DeprecationWarning, - ) - return merge(*iterables, **kwargs) - - -def consumer(func): - """Decorator that automatically advances a PEP-342-style "reverse iterator" - to its first yield point so you don't have to call ``next()`` on it - manually. - - >>> @consumer - ... def tally(): - ... i = 0 - ... while True: - ... print('Thing number %s is %s.' % (i, (yield))) - ... i += 1 - ... - >>> t = tally() - >>> t.send('red') - Thing number 0 is red. - >>> t.send('fish') - Thing number 1 is fish. - - Without the decorator, you would have to call ``next(t)`` before - ``t.send()`` could be used. - - """ - - @wraps(func) - def wrapper(*args, **kwargs): - gen = func(*args, **kwargs) - next(gen) - return gen - - return wrapper - - -def ilen(iterable): - """Return the number of items in *iterable*. - - >>> ilen(x for x in range(1000000) if x % 3 == 0) - 333334 - - This consumes the iterable, so handle with care. - - """ - # This approach was selected because benchmarks showed it's likely the - # fastest of the known implementations at the time of writing. - # See GitHub tracker: #236, #230. - counter = count() - deque(zip(iterable, counter), maxlen=0) - return next(counter) - - -def iterate(func, start): - """Return ``start``, ``func(start)``, ``func(func(start))``, ... - - >>> from itertools import islice - >>> list(islice(iterate(lambda x: 2*x, 1), 10)) - [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] - - """ - while True: - yield start - start = func(start) - - -def with_iter(context_manager): - """Wrap an iterable in a ``with`` statement, so it closes once exhausted. - - For example, this will close the file when the iterator is exhausted:: - - upper_lines = (line.upper() for line in with_iter(open('foo'))) - - Any context manager which returns an iterable is a candidate for - ``with_iter``. - - """ - with context_manager as iterable: - yield from iterable - - -def one(iterable, too_short=None, too_long=None): - """Return the first item from *iterable*, which is expected to contain only - that item. Raise an exception if *iterable* is empty or has more than one - item. - - :func:`one` is useful for ensuring that an iterable contains only one item. - For example, it can be used to retrieve the result of a database query - that is expected to return a single row. - - If *iterable* is empty, ``ValueError`` will be raised. You may specify a - different exception with the *too_short* keyword: - - >>> it = [] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too many items in iterable (expected 1)' - >>> too_short = IndexError('too few items') - >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - IndexError: too few items - - Similarly, if *iterable* contains more than one item, ``ValueError`` will - be raised. You may specify a different exception with the *too_long* - keyword: - - >>> it = ['too', 'many'] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 'too', - 'many', and perhaps more. - >>> too_long = RuntimeError - >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - RuntimeError - - Note that :func:`one` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check iterable - contents less destructively. - - """ - it = iter(iterable) - - try: - first_value = next(it) - except StopIteration as e: - raise ( - too_short or ValueError('too few items in iterable (expected 1)') - ) from e - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def raise_(exception, *args): - raise exception(*args) - - -def strictly_n(iterable, n, too_short=None, too_long=None): - """Validate that *iterable* has exactly *n* items and return them if - it does. If it has fewer than *n* items, call function *too_short* - with those items. If it has more than *n* items, call function - *too_long* with the first ``n + 1`` items. - - >>> iterable = ['a', 'b', 'c', 'd'] - >>> n = 4 - >>> list(strictly_n(iterable, n)) - ['a', 'b', 'c', 'd'] - - By default, *too_short* and *too_long* are functions that raise - ``ValueError``. - - >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too few items in iterable (got 2) - - >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too many items in iterable (got at least 3) - - You can instead supply functions that do something else. - *too_short* will be called with the number of items in *iterable*. - *too_long* will be called with `n + 1`. - - >>> def too_short(item_count): - ... raise RuntimeError - >>> it = strictly_n('abcd', 6, too_short=too_short) - >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - RuntimeError - - >>> def too_long(item_count): - ... print('The boss is going to hear about this') - >>> it = strictly_n('abcdef', 4, too_long=too_long) - >>> list(it) - The boss is going to hear about this - ['a', 'b', 'c', 'd'] - - """ - if too_short is None: - too_short = lambda item_count: raise_( - ValueError, - 'Too few items in iterable (got {})'.format(item_count), - ) - - if too_long is None: - too_long = lambda item_count: raise_( - ValueError, - 'Too many items in iterable (got at least {})'.format(item_count), - ) - - it = iter(iterable) - for i in range(n): - try: - item = next(it) - except StopIteration: - too_short(i) - return - else: - yield item - - try: - next(it) - except StopIteration: - pass - else: - too_long(n + 1) - - -def distinct_permutations(iterable, r=None): - """Yield successive distinct permutations of the elements in *iterable*. - - >>> sorted(distinct_permutations([1, 0, 1])) - [(0, 1, 1), (1, 0, 1), (1, 1, 0)] - - Equivalent to ``set(permutations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - Duplicate permutations arise when there are duplicated elements in the - input iterable. The number of items returned is - `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of - items input, and each `x_i` is the count of a distinct item in the input - sequence. - - If *r* is given, only the *r*-length permutations are yielded. - - >>> sorted(distinct_permutations([1, 0, 1], r=2)) - [(0, 1), (1, 0), (1, 1)] - >>> sorted(distinct_permutations(range(3), r=2)) - [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] - - """ - # Algorithm: https://w.wiki/Qai - def _full(A): - while True: - # Yield the permutation we have - yield tuple(A) - - # Find the largest index i such that A[i] < A[i + 1] - for i in range(size - 2, -1, -1): - if A[i] < A[i + 1]: - break - # If no such index exists, this permutation is the last one - else: - return - - # Find the largest index j greater than j such that A[i] < A[j] - for j in range(size - 1, i, -1): - if A[i] < A[j]: - break - - # Swap the value of A[i] with that of A[j], then reverse the - # sequence from A[i + 1] to form the new permutation - A[i], A[j] = A[j], A[i] - A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] - - # Algorithm: modified from the above - def _partial(A, r): - # Split A into the first r items and the last r items - head, tail = A[:r], A[r:] - right_head_indexes = range(r - 1, -1, -1) - left_tail_indexes = range(len(tail)) - - while True: - # Yield the permutation we have - yield tuple(head) - - # Starting from the right, find the first index of the head with - # value smaller than the maximum value of the tail - call it i. - pivot = tail[-1] - for i in right_head_indexes: - if head[i] < pivot: - break - pivot = head[i] - else: - return - - # Starting from the left, find the first value of the tail - # with a value greater than head[i] and swap. - for j in left_tail_indexes: - if tail[j] > head[i]: - head[i], tail[j] = tail[j], head[i] - break - # If we didn't find one, start from the right and find the first - # index of the head with a value greater than head[i] and swap. - else: - for j in right_head_indexes: - if head[j] > head[i]: - head[i], head[j] = head[j], head[i] - break - - # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] - tail += head[: i - r : -1] # head[i + 1:][::-1] - i += 1 - head[i:], tail[:] = tail[: r - i], tail[r - i :] - - items = sorted(iterable) - - size = len(items) - if r is None: - r = size - - if 0 < r <= size: - return _full(items) if (r == size) else _partial(items, r) - - return iter(() if r else ((),)) - - -def intersperse(e, iterable, n=1): - """Intersperse filler element *e* among the items in *iterable*, leaving - *n* items between each filler element. - - >>> list(intersperse('!', [1, 2, 3, 4, 5])) - [1, '!', 2, '!', 3, '!', 4, '!', 5] - - >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) - [1, 2, None, 3, 4, None, 5] - - """ - if n == 0: - raise ValueError('n must be > 0') - elif n == 1: - # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2... - # islice(..., 1, None) -> x_0, e, x_1, e, x_2... - return islice(interleave(repeat(e), iterable), 1, None) - else: - # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... - # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... - # flatten(...) -> x_0, x_1, e, x_2, x_3... - filler = repeat([e]) - chunks = chunked(iterable, n) - return flatten(islice(interleave(filler, chunks), 1, None)) - - -def unique_to_each(*iterables): - """Return the elements from each of the input iterables that aren't in the - other input iterables. - - For example, suppose you have a set of packages, each with a set of - dependencies:: - - {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} - - If you remove one package, which dependencies can also be removed? - - If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not - associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for - ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: - - >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) - [['A'], ['C'], ['D']] - - If there are duplicates in one input iterable that aren't in the others - they will be duplicated in the output. Input order is preserved:: - - >>> unique_to_each("mississippi", "missouri") - [['p', 'p'], ['o', 'u', 'r']] - - It is assumed that the elements of each iterable are hashable. - - """ - pool = [list(it) for it in iterables] - counts = Counter(chain.from_iterable(map(set, pool))) - uniques = {element for element in counts if counts[element] == 1} - return [list(filter(uniques.__contains__, it)) for it in pool] - - -def windowed(seq, n, fillvalue=None, step=1): - """Return a sliding window of width *n* over the given iterable. - - >>> all_windows = windowed([1, 2, 3, 4, 5], 3) - >>> list(all_windows) - [(1, 2, 3), (2, 3, 4), (3, 4, 5)] - - When the window is larger than the iterable, *fillvalue* is used in place - of missing values: - - >>> list(windowed([1, 2, 3], 4)) - [(1, 2, 3, None)] - - Each window will advance in increments of *step*: - - >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) - [(1, 2, 3), (3, 4, 5), (5, 6, '!')] - - To slide into the iterable's items, use :func:`chain` to add filler items - to the left: - - >>> iterable = [1, 2, 3, 4] - >>> n = 3 - >>> padding = [None] * (n - 1) - >>> list(windowed(chain(padding, iterable), 3)) - [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] - """ - if n < 0: - raise ValueError('n must be >= 0') - if n == 0: - yield tuple() - return - if step < 1: - raise ValueError('step must be >= 1') - - window = deque(maxlen=n) - i = n - for _ in map(window.append, seq): - i -= 1 - if not i: - i = step - yield tuple(window) - - size = len(window) - if size < n: - yield tuple(chain(window, repeat(fillvalue, n - size))) - elif 0 < i < min(step, n): - window += (fillvalue,) * i - yield tuple(window) - - -def substrings(iterable): - """Yield all of the substrings of *iterable*. - - >>> [''.join(s) for s in substrings('more')] - ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] - - Note that non-string iterables can also be subdivided. - - >>> list(substrings([0, 1, 2])) - [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] - - """ - # The length-1 substrings - seq = [] - for item in iter(iterable): - seq.append(item) - yield (item,) - seq = tuple(seq) - item_count = len(seq) - - # And the rest - for n in range(2, item_count + 1): - for i in range(item_count - n + 1): - yield seq[i : i + n] - - -def substrings_indexes(seq, reverse=False): - """Yield all substrings and their positions in *seq* - - The items yielded will be a tuple of the form ``(substr, i, j)``, where - ``substr == seq[i:j]``. - - This function only works for iterables that support slicing, such as - ``str`` objects. - - >>> for item in substrings_indexes('more'): - ... print(item) - ('m', 0, 1) - ('o', 1, 2) - ('r', 2, 3) - ('e', 3, 4) - ('mo', 0, 2) - ('or', 1, 3) - ('re', 2, 4) - ('mor', 0, 3) - ('ore', 1, 4) - ('more', 0, 4) - - Set *reverse* to ``True`` to yield the same items in the opposite order. - - - """ - r = range(1, len(seq) + 1) - if reverse: - r = reversed(r) - return ( - (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) - ) - - -class bucket: - """Wrap *iterable* and return an object that buckets it iterable into - child iterables based on a *key* function. - - >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] - >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character - >>> sorted(list(s)) # Get the keys - ['a', 'b', 'c'] - >>> a_iterable = s['a'] - >>> next(a_iterable) - 'a1' - >>> next(a_iterable) - 'a2' - >>> list(s['b']) - ['b1', 'b2', 'b3'] - - The original iterable will be advanced and its items will be cached until - they are used by the child iterables. This may require significant storage. - - By default, attempting to select a bucket to which no items belong will - exhaust the iterable and cache all values. - If you specify a *validator* function, selected buckets will instead be - checked against it. - - >>> from itertools import count - >>> it = count(1, 2) # Infinite sequence of odd numbers - >>> key = lambda x: x % 10 # Bucket by last digit - >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only - >>> s = bucket(it, key=key, validator=validator) - >>> 2 in s - False - >>> list(s[2]) - [] - - """ - - def __init__(self, iterable, key, validator=None): - self._it = iter(iterable) - self._key = key - self._cache = defaultdict(deque) - self._validator = validator or (lambda x: True) - - def __contains__(self, value): - if not self._validator(value): - return False - - try: - item = next(self[value]) - except StopIteration: - return False - else: - self._cache[value].appendleft(item) - - return True - - def _get_values(self, value): - """ - Helper to yield items from the parent iterator that match *value*. - Items that don't match are stored in the local cache as they - are encountered. - """ - while True: - # If we've cached some items that match the target value, emit - # the first one and evict it from the cache. - if self._cache[value]: - yield self._cache[value].popleft() - # Otherwise we need to advance the parent iterator to search for - # a matching item, caching the rest. - else: - while True: - try: - item = next(self._it) - except StopIteration: - return - item_value = self._key(item) - if item_value == value: - yield item - break - elif self._validator(item_value): - self._cache[item_value].append(item) - - def __iter__(self): - for item in self._it: - item_value = self._key(item) - if self._validator(item_value): - self._cache[item_value].append(item) - - yield from self._cache.keys() - - def __getitem__(self, value): - if not self._validator(value): - return iter(()) - - return self._get_values(value) - - -def spy(iterable, n=1): - """Return a 2-tuple with a list containing the first *n* elements of - *iterable*, and an iterator with the same items as *iterable*. - This allows you to "look ahead" at the items in the iterable without - advancing it. - - There is one item in the list by default: - - >>> iterable = 'abcdefg' - >>> head, iterable = spy(iterable) - >>> head - ['a'] - >>> list(iterable) - ['a', 'b', 'c', 'd', 'e', 'f', 'g'] - - You may use unpacking to retrieve items instead of lists: - - >>> (head,), iterable = spy('abcdefg') - >>> head - 'a' - >>> (first, second), iterable = spy('abcdefg', 2) - >>> first - 'a' - >>> second - 'b' - - The number of items requested can be larger than the number of items in - the iterable: - - >>> iterable = [1, 2, 3, 4, 5] - >>> head, iterable = spy(iterable, 10) - >>> head - [1, 2, 3, 4, 5] - >>> list(iterable) - [1, 2, 3, 4, 5] - - """ - it = iter(iterable) - head = take(n, it) - - return head.copy(), chain(head, it) - - -def interleave(*iterables): - """Return a new iterable yielding from each iterable in turn, - until the shortest is exhausted. - - >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7] - - For a version that doesn't terminate after the shortest iterable is - exhausted, see :func:`interleave_longest`. - - """ - return chain.from_iterable(zip(*iterables)) - - -def interleave_longest(*iterables): - """Return a new iterable yielding from each iterable in turn, - skipping any that are exhausted. - - >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7, 3, 8] - - This function produces the same output as :func:`roundrobin`, but may - perform better for some inputs (in particular when the number of iterables - is large). - - """ - i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) - return (x for x in i if x is not _marker) - - -def interleave_evenly(iterables, lengths=None): - """ - Interleave multiple iterables so that their elements are evenly distributed - throughout the output sequence. - - >>> iterables = [1, 2, 3, 4, 5], ['a', 'b'] - >>> list(interleave_evenly(iterables)) - [1, 2, 'a', 3, 4, 'b', 5] - - >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]] - >>> list(interleave_evenly(iterables)) - [1, 6, 4, 2, 7, 3, 8, 5] - - This function requires iterables of known length. Iterables without - ``__len__()`` can be used by manually specifying lengths with *lengths*: - - >>> from itertools import combinations, repeat - >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']] - >>> lengths = [4 * (4 - 1) // 2, 3] - >>> list(interleave_evenly(iterables, lengths=lengths)) - [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c'] - - Based on Bresenham's algorithm. - """ - if lengths is None: - try: - lengths = [len(it) for it in iterables] - except TypeError: - raise ValueError( - 'Iterable lengths could not be determined automatically. ' - 'Specify them with the lengths keyword.' - ) - elif len(iterables) != len(lengths): - raise ValueError('Mismatching number of iterables and lengths.') - - dims = len(lengths) - - # sort iterables by length, descending - lengths_permute = sorted( - range(dims), key=lambda i: lengths[i], reverse=True - ) - lengths_desc = [lengths[i] for i in lengths_permute] - iters_desc = [iter(iterables[i]) for i in lengths_permute] - - # the longest iterable is the primary one (Bresenham: the longest - # distance along an axis) - delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:] - iter_primary, iters_secondary = iters_desc[0], iters_desc[1:] - errors = [delta_primary // dims] * len(deltas_secondary) - - to_yield = sum(lengths) - while to_yield: - yield next(iter_primary) - to_yield -= 1 - # update errors for each secondary iterable - errors = [e - delta for e, delta in zip(errors, deltas_secondary)] - - # those iterables for which the error is negative are yielded - # ("diagonal step" in Bresenham) - for i, e in enumerate(errors): - if e < 0: - yield next(iters_secondary[i]) - to_yield -= 1 - errors[i] += delta_primary - - -def collapse(iterable, base_type=None, levels=None): - """Flatten an iterable with multiple levels of nesting (e.g., a list of - lists of tuples) into non-iterable types. - - >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] - >>> list(collapse(iterable)) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and - will not be collapsed. - - To avoid collapsing other types, specify *base_type*: - - >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] - >>> list(collapse(iterable, base_type=tuple)) - ['ab', ('cd', 'ef'), 'gh', 'ij'] - - Specify *levels* to stop flattening after a certain level: - - >>> iterable = [('a', ['b']), ('c', ['d'])] - >>> list(collapse(iterable)) # Fully flattened - ['a', 'b', 'c', 'd'] - >>> list(collapse(iterable, levels=1)) # Only one level flattened - ['a', ['b'], 'c', ['d']] - - """ - - def walk(node, level): - if ( - ((levels is not None) and (level > levels)) - or isinstance(node, (str, bytes)) - or ((base_type is not None) and isinstance(node, base_type)) - ): - yield node - return - - try: - tree = iter(node) - except TypeError: - yield node - return - else: - for child in tree: - yield from walk(child, level + 1) - - yield from walk(iterable, 0) - - -def side_effect(func, iterable, chunk_size=None, before=None, after=None): - """Invoke *func* on each item in *iterable* (or on each *chunk_size* group - of items) before yielding the item. - - `func` must be a function that takes a single argument. Its return value - will be discarded. - - *before* and *after* are optional functions that take no arguments. They - will be executed before iteration starts and after it ends, respectively. - - `side_effect` can be used for logging, updating progress bars, or anything - that is not functionally "pure." - - Emitting a status message: - - >>> from more_itertools import consume - >>> func = lambda item: print('Received {}'.format(item)) - >>> consume(side_effect(func, range(2))) - Received 0 - Received 1 - - Operating on chunks of items: - - >>> pair_sums = [] - >>> func = lambda chunk: pair_sums.append(sum(chunk)) - >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) - [0, 1, 2, 3, 4, 5] - >>> list(pair_sums) - [1, 5, 9] - - Writing to a file-like object: - - >>> from io import StringIO - >>> from more_itertools import consume - >>> f = StringIO() - >>> func = lambda x: print(x, file=f) - >>> before = lambda: print(u'HEADER', file=f) - >>> after = f.close - >>> it = [u'a', u'b', u'c'] - >>> consume(side_effect(func, it, before=before, after=after)) - >>> f.closed - True - - """ - try: - if before is not None: - before() - - if chunk_size is None: - for item in iterable: - func(item) - yield item - else: - for chunk in chunked(iterable, chunk_size): - func(chunk) - yield from chunk - finally: - if after is not None: - after() - - -def sliced(seq, n, strict=False): - """Yield slices of length *n* from the sequence *seq*. - - >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) - [(1, 2, 3), (4, 5, 6)] - - By the default, the last yielded slice will have fewer than *n* elements - if the length of *seq* is not divisible by *n*: - - >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) - [(1, 2, 3), (4, 5, 6), (7, 8)] - - If the length of *seq* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - slice is yielded. - - This function will only work for iterables that support slicing. - For non-sliceable iterables, see :func:`chunked`. - - """ - iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) - if strict: - - def ret(): - for _slice in iterator: - if len(_slice) != n: - raise ValueError("seq is not divisible by n.") - yield _slice - - return iter(ret()) - else: - return iterator - - -def split_at(iterable, pred, maxsplit=-1, keep_separator=False): - """Yield lists of items from *iterable*, where each list is delimited by - an item where callable *pred* returns ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b')) - [['a'], ['c', 'd', 'c'], ['a']] - - >>> list(split_at(range(10), lambda n: n % 2 == 1)) - [[0], [2], [4], [6], [8], []] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) - [[0], [2], [4, 5, 6, 7, 8, 9]] - - By default, the delimiting items are not included in the output. - The include them, set *keep_separator* to ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) - [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item): - yield buf - if keep_separator: - yield [item] - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - else: - buf.append(item) - yield buf - - -def split_before(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends just before - an item for which callable *pred* returns ``True``: - - >>> list(split_before('OneTwo', lambda s: s.isupper())) - [['O', 'n', 'e'], ['T', 'w', 'o']] - - >>> list(split_before(range(10), lambda n: n % 3 == 0)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield [item] + list(it) - return - buf = [] - maxsplit -= 1 - buf.append(item) - if buf: - yield buf - - -def split_after(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends with an - item where callable *pred* returns ``True``: - - >>> list(split_after('one1two2', lambda s: s.isdigit())) - [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] - - >>> list(split_after(range(10), lambda n: n % 3 == 0)) - [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - buf.append(item) - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - if buf: - yield buf - - -def split_when(iterable, pred, maxsplit=-1): - """Split *iterable* into pieces based on the output of *pred*. - *pred* should be a function that takes successive pairs of items and - returns ``True`` if the iterable should be split in between them. - - For example, to find runs of increasing numbers, split the iterable when - element ``i`` is larger than element ``i + 1``: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) - [[1, 2, 3, 3], [2, 5], [2, 4], [2]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], - ... lambda x, y: x > y, maxsplit=2)) - [[1, 2, 3, 3], [2, 5], [2, 4, 2]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - it = iter(iterable) - try: - cur_item = next(it) - except StopIteration: - return - - buf = [cur_item] - for next_item in it: - if pred(cur_item, next_item): - yield buf - if maxsplit == 1: - yield [next_item] + list(it) - return - buf = [] - maxsplit -= 1 - - buf.append(next_item) - cur_item = next_item - - yield buf - - -def split_into(iterable, sizes): - """Yield a list of sequential items from *iterable* of length 'n' for each - integer 'n' in *sizes*. - - >>> list(split_into([1,2,3,4,5,6], [1,2,3])) - [[1], [2, 3], [4, 5, 6]] - - If the sum of *sizes* is smaller than the length of *iterable*, then the - remaining items of *iterable* will not be returned. - - >>> list(split_into([1,2,3,4,5,6], [2,3])) - [[1, 2], [3, 4, 5]] - - If the sum of *sizes* is larger than the length of *iterable*, fewer items - will be returned in the iteration that overruns *iterable* and further - lists will be empty: - - >>> list(split_into([1,2,3,4], [1,2,3,4])) - [[1], [2, 3], [4], []] - - When a ``None`` object is encountered in *sizes*, the returned list will - contain items up to the end of *iterable* the same way that itertools.slice - does: - - >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) - [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] - - :func:`split_into` can be useful for grouping a series of items where the - sizes of the groups are not uniform. An example would be where in a row - from a table, multiple columns represent elements of the same feature - (e.g. a point represented by x,y,z) but, the format is not the same for - all columns. - """ - # convert the iterable argument into an iterator so its contents can - # be consumed by islice in case it is a generator - it = iter(iterable) - - for size in sizes: - if size is None: - yield list(it) - return - else: - yield list(islice(it, size)) - - -def padded(iterable, fillvalue=None, n=None, next_multiple=False): - """Yield the elements from *iterable*, followed by *fillvalue*, such that - at least *n* items are emitted. - - >>> list(padded([1, 2, 3], '?', 5)) - [1, 2, 3, '?', '?'] - - If *next_multiple* is ``True``, *fillvalue* will be emitted until the - number of items emitted is a multiple of *n*:: - - >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) - [1, 2, 3, 4, None, None] - - If *n* is ``None``, *fillvalue* will be emitted indefinitely. - - """ - it = iter(iterable) - if n is None: - yield from chain(it, repeat(fillvalue)) - elif n < 1: - raise ValueError('n must be at least 1') - else: - item_count = 0 - for item in it: - yield item - item_count += 1 - - remaining = (n - item_count) % n if next_multiple else n - item_count - for _ in range(remaining): - yield fillvalue - - -def repeat_each(iterable, n=2): - """Repeat each element in *iterable* *n* times. - - >>> list(repeat_each('ABC', 3)) - ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C'] - """ - return chain.from_iterable(map(repeat, iterable, repeat(n))) - - -def repeat_last(iterable, default=None): - """After the *iterable* is exhausted, keep yielding its last element. - - >>> list(islice(repeat_last(range(3)), 5)) - [0, 1, 2, 2, 2] - - If the iterable is empty, yield *default* forever:: - - >>> list(islice(repeat_last(range(0), 42), 5)) - [42, 42, 42, 42, 42] - - """ - item = _marker - for item in iterable: - yield item - final = default if item is _marker else item - yield from repeat(final) - - -def distribute(n, iterable): - """Distribute the items from *iterable* among *n* smaller iterables. - - >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 3, 5] - >>> list(group_2) - [2, 4, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 4, 7], [2, 5], [3, 6]] - - If the length of *iterable* is smaller than *n*, then the last returned - iterables will be empty: - - >>> children = distribute(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function uses :func:`itertools.tee` and may require significant - storage. If you need the order items in the smaller iterables to match the - original iterable, see :func:`divide`. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - children = tee(iterable, n) - return [islice(it, index, None, n) for index, it in enumerate(children)] - - -def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): - """Yield tuples whose elements are offset from *iterable*. - The amount by which the `i`-th item in each tuple is offset is given by - the `i`-th item in *offsets*. - - >>> list(stagger([0, 1, 2, 3])) - [(None, 0, 1), (0, 1, 2), (1, 2, 3)] - >>> list(stagger(range(8), offsets=(0, 2, 4))) - [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] - - By default, the sequence will end when the final element of a tuple is the - last item in the iterable. To continue until the first element of a tuple - is the last item in the iterable, set *longest* to ``True``:: - - >>> list(stagger([0, 1, 2, 3], longest=True)) - [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - children = tee(iterable, len(offsets)) - - return zip_offset( - *children, offsets=offsets, longest=longest, fillvalue=fillvalue - ) - - -class UnequalIterablesError(ValueError): - def __init__(self, details=None): - msg = 'Iterables have different lengths' - if details is not None: - msg += (': index 0 has length {}; index {} has length {}').format( - *details - ) - - super().__init__(msg) - - -def _zip_equal_generator(iterables): - for combo in zip_longest(*iterables, fillvalue=_marker): - for val in combo: - if val is _marker: - raise UnequalIterablesError() - yield combo - - -def _zip_equal(*iterables): - # Check whether the iterables are all the same size. - try: - first_size = len(iterables[0]) - for i, it in enumerate(iterables[1:], 1): - size = len(it) - if size != first_size: - break - else: - # If we didn't break out, we can use the built-in zip. - return zip(*iterables) - - # If we did break out, there was a mismatch. - raise UnequalIterablesError(details=(first_size, i, size)) - # If any one of the iterables didn't have a length, start reading - # them until one runs out. - except TypeError: - return _zip_equal_generator(iterables) - - -def zip_equal(*iterables): - """``zip`` the input *iterables* together, but raise - ``UnequalIterablesError`` if they aren't all the same length. - - >>> it_1 = range(3) - >>> it_2 = iter('abc') - >>> list(zip_equal(it_1, it_2)) - [(0, 'a'), (1, 'b'), (2, 'c')] - - >>> it_1 = range(3) - >>> it_2 = iter('abcd') - >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - more_itertools.more.UnequalIterablesError: Iterables have different - lengths - - """ - if hexversion >= 0x30A00A6: - warnings.warn( - ( - 'zip_equal will be removed in a future version of ' - 'more-itertools. Use the builtin zip function with ' - 'strict=True instead.' - ), - DeprecationWarning, - ) - - return _zip_equal(*iterables) - - -def zip_offset(*iterables, offsets, longest=False, fillvalue=None): - """``zip`` the input *iterables* together, but offset the `i`-th iterable - by the `i`-th item in *offsets*. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] - - This can be used as a lightweight alternative to SciPy or pandas to analyze - data sets in which some series have a lead or lag relationship. - - By default, the sequence will end when the shortest iterable is exhausted. - To continue until the longest iterable is exhausted, set *longest* to - ``True``. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - if len(iterables) != len(offsets): - raise ValueError("Number of iterables and offsets didn't match") - - staggered = [] - for it, n in zip(iterables, offsets): - if n < 0: - staggered.append(chain(repeat(fillvalue, -n), it)) - elif n > 0: - staggered.append(islice(it, n, None)) - else: - staggered.append(it) - - if longest: - return zip_longest(*staggered, fillvalue=fillvalue) - - return zip(*staggered) - - -def sort_together(iterables, key_list=(0,), key=None, reverse=False): - """Return the input iterables sorted together, with *key_list* as the - priority for sorting. All iterables are trimmed to the length of the - shortest one. - - This can be used like the sorting function in a spreadsheet. If each - iterable represents a column of data, the key list determines which - columns are used for sorting. - - By default, all iterables are sorted using the ``0``-th iterable:: - - >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] - >>> sort_together(iterables) - [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] - - Set a different key list to sort according to another iterable. - Specifying multiple keys dictates how ties are broken:: - - >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] - >>> sort_together(iterables, key_list=(1, 2)) - [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] - - To sort by a function of the elements of the iterable, pass a *key* - function. Its arguments are the elements of the iterables corresponding to - the key list:: - - >>> names = ('a', 'b', 'c') - >>> lengths = (1, 2, 3) - >>> widths = (5, 2, 1) - >>> def area(length, width): - ... return length * width - >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) - [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] - - Set *reverse* to ``True`` to sort in descending order. - - >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) - [(3, 2, 1), ('a', 'b', 'c')] - - """ - if key is None: - # if there is no key function, the key argument to sorted is an - # itemgetter - key_argument = itemgetter(*key_list) - else: - # if there is a key function, call it with the items at the offsets - # specified by the key function as arguments - key_list = list(key_list) - if len(key_list) == 1: - # if key_list contains a single item, pass the item at that offset - # as the only argument to the key function - key_offset = key_list[0] - key_argument = lambda zipped_items: key(zipped_items[key_offset]) - else: - # if key_list contains multiple items, use itemgetter to return a - # tuple of items, which we pass as *args to the key function - get_key_items = itemgetter(*key_list) - key_argument = lambda zipped_items: key( - *get_key_items(zipped_items) - ) - - return list( - zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) - ) - - -def unzip(iterable): - """The inverse of :func:`zip`, this function disaggregates the elements - of the zipped *iterable*. - - The ``i``-th iterable contains the ``i``-th element from each element - of the zipped iterable. The first element is used to to determine the - length of the remaining elements. - - >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> letters, numbers = unzip(iterable) - >>> list(letters) - ['a', 'b', 'c', 'd'] - >>> list(numbers) - [1, 2, 3, 4] - - This is similar to using ``zip(*iterable)``, but it avoids reading - *iterable* into memory. Note, however, that this function uses - :func:`itertools.tee` and thus may require significant storage. - - """ - head, iterable = spy(iter(iterable)) - if not head: - # empty iterable, e.g. zip([], [], []) - return () - # spy returns a one-length iterable as head - head = head[0] - iterables = tee(iterable, len(head)) - - def itemgetter(i): - def getter(obj): - try: - return obj[i] - except IndexError: - # basically if we have an iterable like - # iter([(1, 2, 3), (4, 5), (6,)]) - # the second unzipped iterable would fail at the third tuple - # since it would try to access tup[1] - # same with the third unzipped iterable and the second tuple - # to support these "improperly zipped" iterables, - # we create a custom itemgetter - # which just stops the unzipped iterables - # at first length mismatch - raise StopIteration - - return getter - - return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) - - -def divide(n, iterable): - """Divide the elements from *iterable* into *n* parts, maintaining - order. - - >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 2, 3] - >>> list(group_2) - [4, 5, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 2, 3], [4, 5], [6, 7]] - - If the length of the iterable is smaller than n, then the last returned - iterables will be empty: - - >>> children = divide(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function will exhaust the iterable before returning and may require - significant storage. If order is not important, see :func:`distribute`, - which does not first pull the iterable into memory. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - try: - iterable[:0] - except TypeError: - seq = tuple(iterable) - else: - seq = iterable - - q, r = divmod(len(seq), n) - - ret = [] - stop = 0 - for i in range(1, n + 1): - start = stop - stop += q + 1 if i <= r else q - ret.append(iter(seq[start:stop])) - - return ret - - -def always_iterable(obj, base_type=(str, bytes)): - """If *obj* is iterable, return an iterator over its items:: - - >>> obj = (1, 2, 3) - >>> list(always_iterable(obj)) - [1, 2, 3] - - If *obj* is not iterable, return a one-item iterable containing *obj*:: - - >>> obj = 1 - >>> list(always_iterable(obj)) - [1] - - If *obj* is ``None``, return an empty iterable: - - >>> obj = None - >>> list(always_iterable(None)) - [] - - By default, binary and text strings are not considered iterable:: - - >>> obj = 'foo' - >>> list(always_iterable(obj)) - ['foo'] - - If *base_type* is set, objects for which ``isinstance(obj, base_type)`` - returns ``True`` won't be considered iterable. - - >>> obj = {'a': 1} - >>> list(always_iterable(obj)) # Iterate over the dict's keys - ['a'] - >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit - [{'a': 1}] - - Set *base_type* to ``None`` to avoid any special handling and treat objects - Python considers iterable as iterable: - - >>> obj = 'foo' - >>> list(always_iterable(obj, base_type=None)) - ['f', 'o', 'o'] - """ - if obj is None: - return iter(()) - - if (base_type is not None) and isinstance(obj, base_type): - return iter((obj,)) - - try: - return iter(obj) - except TypeError: - return iter((obj,)) - - -def adjacent(predicate, iterable, distance=1): - """Return an iterable over `(bool, item)` tuples where the `item` is - drawn from *iterable* and the `bool` indicates whether - that item satisfies the *predicate* or is adjacent to an item that does. - - For example, to find whether items are adjacent to a ``3``:: - - >>> list(adjacent(lambda x: x == 3, range(6))) - [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] - - Set *distance* to change what counts as adjacent. For example, to find - whether items are two places away from a ``3``: - - >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) - [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] - - This is useful for contextualizing the results of a search function. - For example, a code comparison tool might want to identify lines that - have changed, but also surrounding lines to give the viewer of the diff - context. - - The predicate function will only be called once for each item in the - iterable. - - See also :func:`groupby_transform`, which can be used with this function - to group ranges of items with the same `bool` value. - - """ - # Allow distance=0 mainly for testing that it reproduces results with map() - if distance < 0: - raise ValueError('distance must be at least 0') - - i1, i2 = tee(iterable) - padding = [False] * distance - selected = chain(padding, map(predicate, i1), padding) - adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) - return zip(adjacent_to_selected, i2) - - -def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): - """An extension of :func:`itertools.groupby` that can apply transformations - to the grouped data. - - * *keyfunc* is a function computing a key value for each item in *iterable* - * *valuefunc* is a function that transforms the individual items from - *iterable* after grouping - * *reducefunc* is a function that transforms each group of items - - >>> iterable = 'aAAbBBcCC' - >>> keyfunc = lambda k: k.upper() - >>> valuefunc = lambda v: v.lower() - >>> reducefunc = lambda g: ''.join(g) - >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) - [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] - - Each optional argument defaults to an identity function if not specified. - - :func:`groupby_transform` is useful when grouping elements of an iterable - using a separate iterable as the key. To do this, :func:`zip` the iterables - and pass a *keyfunc* that extracts the first element and a *valuefunc* - that extracts the second element:: - - >>> from operator import itemgetter - >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] - >>> values = 'abcdefghi' - >>> iterable = zip(keys, values) - >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) - >>> [(k, ''.join(g)) for k, g in grouper] - [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] - - Note that the order of items in the iterable is significant. - Only adjacent items are grouped together, so if you don't want any - duplicate groups, you should sort the iterable by the key function. - - """ - ret = groupby(iterable, keyfunc) - if valuefunc: - ret = ((k, map(valuefunc, g)) for k, g in ret) - if reducefunc: - ret = ((k, reducefunc(g)) for k, g in ret) - - return ret - - -class numeric_range(abc.Sequence, abc.Hashable): - """An extension of the built-in ``range()`` function whose arguments can - be any orderable numeric type. - - With only *stop* specified, *start* defaults to ``0`` and *step* - defaults to ``1``. The output items will match the type of *stop*: - - >>> list(numeric_range(3.5)) - [0.0, 1.0, 2.0, 3.0] - - With only *start* and *stop* specified, *step* defaults to ``1``. The - output items will match the type of *start*: - - >>> from decimal import Decimal - >>> start = Decimal('2.1') - >>> stop = Decimal('5.1') - >>> list(numeric_range(start, stop)) - [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] - - With *start*, *stop*, and *step* specified the output items will match - the type of ``start + step``: - - >>> from fractions import Fraction - >>> start = Fraction(1, 2) # Start at 1/2 - >>> stop = Fraction(5, 2) # End at 5/2 - >>> step = Fraction(1, 2) # Count by 1/2 - >>> list(numeric_range(start, stop, step)) - [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] - - If *step* is zero, ``ValueError`` is raised. Negative steps are supported: - - >>> list(numeric_range(3, -1, -1.0)) - [3.0, 2.0, 1.0, 0.0] - - Be aware of the limitations of floating point numbers; the representation - of the yielded numbers may be surprising. - - ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* - is a ``datetime.timedelta`` object: - - >>> import datetime - >>> start = datetime.datetime(2019, 1, 1) - >>> stop = datetime.datetime(2019, 1, 3) - >>> step = datetime.timedelta(days=1) - >>> items = iter(numeric_range(start, stop, step)) - >>> next(items) - datetime.datetime(2019, 1, 1, 0, 0) - >>> next(items) - datetime.datetime(2019, 1, 2, 0, 0) - - """ - - _EMPTY_HASH = hash(range(0, 0)) - - def __init__(self, *args): - argc = len(args) - if argc == 1: - (self._stop,) = args - self._start = type(self._stop)(0) - self._step = type(self._stop - self._start)(1) - elif argc == 2: - self._start, self._stop = args - self._step = type(self._stop - self._start)(1) - elif argc == 3: - self._start, self._stop, self._step = args - elif argc == 0: - raise TypeError( - 'numeric_range expected at least ' - '1 argument, got {}'.format(argc) - ) - else: - raise TypeError( - 'numeric_range expected at most ' - '3 arguments, got {}'.format(argc) - ) - - self._zero = type(self._step)(0) - if self._step == self._zero: - raise ValueError('numeric_range() arg 3 must not be zero') - self._growing = self._step > self._zero - self._init_len() - - def __bool__(self): - if self._growing: - return self._start < self._stop - else: - return self._start > self._stop - - def __contains__(self, elem): - if self._growing: - if self._start <= elem < self._stop: - return (elem - self._start) % self._step == self._zero - else: - if self._start >= elem > self._stop: - return (self._start - elem) % (-self._step) == self._zero - - return False - - def __eq__(self, other): - if isinstance(other, numeric_range): - empty_self = not bool(self) - empty_other = not bool(other) - if empty_self or empty_other: - return empty_self and empty_other # True if both empty - else: - return ( - self._start == other._start - and self._step == other._step - and self._get_by_index(-1) == other._get_by_index(-1) - ) - else: - return False - - def __getitem__(self, key): - if isinstance(key, int): - return self._get_by_index(key) - elif isinstance(key, slice): - step = self._step if key.step is None else key.step * self._step - - if key.start is None or key.start <= -self._len: - start = self._start - elif key.start >= self._len: - start = self._stop - else: # -self._len < key.start < self._len - start = self._get_by_index(key.start) - - if key.stop is None or key.stop >= self._len: - stop = self._stop - elif key.stop <= -self._len: - stop = self._start - else: # -self._len < key.stop < self._len - stop = self._get_by_index(key.stop) - - return numeric_range(start, stop, step) - else: - raise TypeError( - 'numeric range indices must be ' - 'integers or slices, not {}'.format(type(key).__name__) - ) - - def __hash__(self): - if self: - return hash((self._start, self._get_by_index(-1), self._step)) - else: - return self._EMPTY_HASH - - def __iter__(self): - values = (self._start + (n * self._step) for n in count()) - if self._growing: - return takewhile(partial(gt, self._stop), values) - else: - return takewhile(partial(lt, self._stop), values) - - def __len__(self): - return self._len - - def _init_len(self): - if self._growing: - start = self._start - stop = self._stop - step = self._step - else: - start = self._stop - stop = self._start - step = -self._step - distance = stop - start - if distance <= self._zero: - self._len = 0 - else: # distance > 0 and step > 0: regular euclidean division - q, r = divmod(distance, step) - self._len = int(q) + int(r != self._zero) - - def __reduce__(self): - return numeric_range, (self._start, self._stop, self._step) - - def __repr__(self): - if self._step == 1: - return "numeric_range({}, {})".format( - repr(self._start), repr(self._stop) - ) - else: - return "numeric_range({}, {}, {})".format( - repr(self._start), repr(self._stop), repr(self._step) - ) - - def __reversed__(self): - return iter( - numeric_range( - self._get_by_index(-1), self._start - self._step, -self._step - ) - ) - - def count(self, value): - return int(value in self) - - def index(self, value): - if self._growing: - if self._start <= value < self._stop: - q, r = divmod(value - self._start, self._step) - if r == self._zero: - return int(q) - else: - if self._start >= value > self._stop: - q, r = divmod(self._start - value, -self._step) - if r == self._zero: - return int(q) - - raise ValueError("{} is not in numeric range".format(value)) - - def _get_by_index(self, i): - if i < 0: - i += self._len - if i < 0 or i >= self._len: - raise IndexError("numeric range object index out of range") - return self._start + i * self._step - - -def count_cycle(iterable, n=None): - """Cycle through the items from *iterable* up to *n* times, yielding - the number of completed cycles along with each item. If *n* is omitted the - process repeats indefinitely. - - >>> list(count_cycle('AB', 3)) - [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] - - """ - iterable = tuple(iterable) - if not iterable: - return iter(()) - counter = count() if n is None else range(n) - return ((i, item) for i in counter for item in iterable) - - -def mark_ends(iterable): - """Yield 3-tuples of the form ``(is_first, is_last, item)``. - - >>> list(mark_ends('ABC')) - [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] - - Use this when looping over an iterable to take special action on its first - and/or last items: - - >>> iterable = ['Header', 100, 200, 'Footer'] - >>> total = 0 - >>> for is_first, is_last, item in mark_ends(iterable): - ... if is_first: - ... continue # Skip the header - ... if is_last: - ... continue # Skip the footer - ... total += item - >>> print(total) - 300 - """ - it = iter(iterable) - - try: - b = next(it) - except StopIteration: - return - - try: - for i in count(): - a = b - b = next(it) - yield i == 0, False, a - - except StopIteration: - yield i == 0, True, a - - -def locate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(locate([0, 1, 1, 0, 1, 0, 0])) - [1, 2, 4] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item. - - >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) - [1, 3] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(locate(iterable, pred=pred, window_size=3)) - [1, 5, 9] - - Use with :func:`seekable` to find indexes and then retrieve the associated - items: - - >>> from itertools import count - >>> from more_itertools import seekable - >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) - >>> it = seekable(source) - >>> pred = lambda x: x > 100 - >>> indexes = locate(it, pred=pred) - >>> i = next(indexes) - >>> it.seek(i) - >>> next(it) - 106 - - """ - if window_size is None: - return compress(count(), map(pred, iterable)) - - if window_size < 1: - raise ValueError('window size must be at least 1') - - it = windowed(iterable, window_size, fillvalue=_marker) - return compress(count(), starmap(pred, it)) - - -def lstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the beginning - for which *pred* returns ``True``. - - For example, to remove a set of items from the start of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(lstrip(iterable, pred)) - [1, 2, None, 3, False, None] - - This function is analogous to to :func:`str.lstrip`, and is essentially - an wrapper for :func:`itertools.dropwhile`. - - """ - return dropwhile(pred, iterable) - - -def rstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the end - for which *pred* returns ``True``. - - For example, to remove a set of items from the end of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(rstrip(iterable, pred)) - [None, False, None, 1, 2, None, 3] - - This function is analogous to :func:`str.rstrip`. - - """ - cache = [] - cache_append = cache.append - cache_clear = cache.clear - for x in iterable: - if pred(x): - cache_append(x) - else: - yield from cache - cache_clear() - yield x - - -def strip(iterable, pred): - """Yield the items from *iterable*, but strip any from the - beginning and end for which *pred* returns ``True``. - - For example, to remove a set of items from both ends of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(strip(iterable, pred)) - [1, 2, None, 3] - - This function is analogous to :func:`str.strip`. - - """ - return rstrip(lstrip(iterable, pred), pred) - - -class islice_extended: - """An extension of :func:`itertools.islice` that supports negative values - for *stop*, *start*, and *step*. - - >>> iterable = iter('abcdefgh') - >>> list(islice_extended(iterable, -4, -1)) - ['e', 'f', 'g'] - - Slices with negative values require some caching of *iterable*, but this - function takes care to minimize the amount of memory required. - - For example, you can use a negative step with an infinite iterator: - - >>> from itertools import count - >>> list(islice_extended(count(), 110, 99, -2)) - [110, 108, 106, 104, 102, 100] - - You can also use slice notation directly: - - >>> iterable = map(str, count()) - >>> it = islice_extended(iterable)[10:20:2] - >>> list(it) - ['10', '12', '14', '16', '18'] - - """ - - def __init__(self, iterable, *args): - it = iter(iterable) - if args: - self._iterable = _islice_helper(it, slice(*args)) - else: - self._iterable = it - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterable) - - def __getitem__(self, key): - if isinstance(key, slice): - return islice_extended(_islice_helper(self._iterable, key)) - - raise TypeError('islice_extended.__getitem__ argument must be a slice') - - -def _islice_helper(it, s): - start = s.start - stop = s.stop - if s.step == 0: - raise ValueError('step argument must be a non-zero integer or None.') - step = s.step or 1 - - if step > 0: - start = 0 if (start is None) else start - - if start < 0: - # Consume all but the last -start items - cache = deque(enumerate(it, 1), maxlen=-start) - len_iter = cache[-1][0] if cache else 0 - - # Adjust start to be positive - i = max(len_iter + start, 0) - - # Adjust stop to be positive - if stop is None: - j = len_iter - elif stop >= 0: - j = min(stop, len_iter) - else: - j = max(len_iter + stop, 0) - - # Slice the cache - n = j - i - if n <= 0: - return - - for index, item in islice(cache, 0, n, step): - yield item - elif (stop is not None) and (stop < 0): - # Advance to the start position - next(islice(it, start, start), None) - - # When stop is negative, we have to carry -stop items while - # iterating - cache = deque(islice(it, -stop), maxlen=-stop) - - for index, item in enumerate(it): - cached_item = cache.popleft() - if index % step == 0: - yield cached_item - cache.append(item) - else: - # When both start and stop are positive we have the normal case - yield from islice(it, start, stop, step) - else: - start = -1 if (start is None) else start - - if (stop is not None) and (stop < 0): - # Consume all but the last items - n = -stop - 1 - cache = deque(enumerate(it, 1), maxlen=n) - len_iter = cache[-1][0] if cache else 0 - - # If start and stop are both negative they are comparable and - # we can just slice. Otherwise we can adjust start to be negative - # and then slice. - if start < 0: - i, j = start, stop - else: - i, j = min(start - len_iter, -1), None - - for index, item in list(cache)[i:j:step]: - yield item - else: - # Advance to the stop position - if stop is not None: - m = stop + 1 - next(islice(it, m, m), None) - - # stop is positive, so if start is negative they are not comparable - # and we need the rest of the items. - if start < 0: - i = start - n = None - # stop is None and start is positive, so we just need items up to - # the start index. - elif stop is None: - i = None - n = start + 1 - # Both stop and start are positive, so they are comparable. - else: - i = None - n = start - stop - if n <= 0: - return - - cache = list(islice(it, n)) - - yield from cache[i::step] - - -def always_reversible(iterable): - """An extension of :func:`reversed` that supports all iterables, not - just those which implement the ``Reversible`` or ``Sequence`` protocols. - - >>> print(*always_reversible(x for x in range(3))) - 2 1 0 - - If the iterable is already reversible, this function returns the - result of :func:`reversed()`. If the iterable is not reversible, - this function will cache the remaining items in the iterable and - yield them in reverse order, which may require significant storage. - """ - try: - return reversed(iterable) - except TypeError: - return reversed(list(iterable)) - - -def consecutive_groups(iterable, ordering=lambda x: x): - """Yield groups of consecutive items using :func:`itertools.groupby`. - The *ordering* function determines whether two items are adjacent by - returning their position. - - By default, the ordering function is the identity function. This is - suitable for finding runs of numbers: - - >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] - >>> for group in consecutive_groups(iterable): - ... print(list(group)) - [1] - [10, 11, 12] - [20] - [30, 31, 32, 33] - [40] - - For finding runs of adjacent letters, try using the :meth:`index` method - of a string of letters: - - >>> from string import ascii_lowercase - >>> iterable = 'abcdfgilmnop' - >>> ordering = ascii_lowercase.index - >>> for group in consecutive_groups(iterable, ordering): - ... print(list(group)) - ['a', 'b', 'c', 'd'] - ['f', 'g'] - ['i'] - ['l', 'm', 'n', 'o', 'p'] - - Each group of consecutive items is an iterator that shares it source with - *iterable*. When an an output group is advanced, the previous group is - no longer available unless its elements are copied (e.g., into a ``list``). - - >>> iterable = [1, 2, 11, 12, 21, 22] - >>> saved_groups = [] - >>> for group in consecutive_groups(iterable): - ... saved_groups.append(list(group)) # Copy group elements - >>> saved_groups - [[1, 2], [11, 12], [21, 22]] - - """ - for k, g in groupby( - enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) - ): - yield map(itemgetter(1), g) - - -def difference(iterable, func=sub, *, initial=None): - """This function is the inverse of :func:`itertools.accumulate`. By default - it will compute the first difference of *iterable* using - :func:`operator.sub`: - - >>> from itertools import accumulate - >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 - >>> list(difference(iterable)) - [0, 1, 2, 3, 4] - - *func* defaults to :func:`operator.sub`, but other functions can be - specified. They will be applied as follows:: - - A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... - - For example, to do progressive division: - - >>> iterable = [1, 2, 6, 24, 120] - >>> func = lambda x, y: x // y - >>> list(difference(iterable, func)) - [1, 2, 3, 4, 5] - - If the *initial* keyword is set, the first element will be skipped when - computing successive differences. - - >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) - >>> list(difference(it, initial=10)) - [1, 2, 3] - - """ - a, b = tee(iterable) - try: - first = [next(b)] - except StopIteration: - return iter([]) - - if initial is not None: - first = [] - - return chain(first, starmap(func, zip(b, a))) - - -class SequenceView(Sequence): - """Return a read-only view of the sequence object *target*. - - :class:`SequenceView` objects are analogous to Python's built-in - "dictionary view" types. They provide a dynamic view of a sequence's items, - meaning that when the sequence updates, so does the view. - - >>> seq = ['0', '1', '2'] - >>> view = SequenceView(seq) - >>> view - SequenceView(['0', '1', '2']) - >>> seq.append('3') - >>> view - SequenceView(['0', '1', '2', '3']) - - Sequence views support indexing, slicing, and length queries. They act - like the underlying sequence, except they don't allow assignment: - - >>> view[1] - '1' - >>> view[1:-1] - ['1', '2'] - >>> len(view) - 4 - - Sequence views are useful as an alternative to copying, as they don't - require (much) extra storage. - - """ - - def __init__(self, target): - if not isinstance(target, Sequence): - raise TypeError - self._target = target - - def __getitem__(self, index): - return self._target[index] - - def __len__(self): - return len(self._target) - - def __repr__(self): - return '{}({})'.format(self.__class__.__name__, repr(self._target)) - - -class seekable: - """Wrap an iterator to allow for seeking backward and forward. This - progressively caches the items in the source iterable so they can be - re-visited. - - Call :meth:`seek` with an index to seek to that position in the source - iterable. - - To "reset" an iterator, seek to ``0``: - - >>> from itertools import count - >>> it = seekable((str(n) for n in count())) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> it.seek(0) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> next(it) - '3' - - You can also seek forward: - - >>> it = seekable((str(n) for n in range(20))) - >>> it.seek(10) - >>> next(it) - '10' - >>> it.seek(20) # Seeking past the end of the source isn't a problem - >>> list(it) - [] - >>> it.seek(0) # Resetting works even after hitting the end - >>> next(it), next(it), next(it) - ('0', '1', '2') - - Call :meth:`peek` to look ahead one item without advancing the iterator: - - >>> it = seekable('1234') - >>> it.peek() - '1' - >>> list(it) - ['1', '2', '3', '4'] - >>> it.peek(default='empty') - 'empty' - - Before the iterator is at its end, calling :func:`bool` on it will return - ``True``. After it will return ``False``: - - >>> it = seekable('5678') - >>> bool(it) - True - >>> list(it) - ['5', '6', '7', '8'] - >>> bool(it) - False - - You may view the contents of the cache with the :meth:`elements` method. - That returns a :class:`SequenceView`, a view that updates automatically: - - >>> it = seekable((str(n) for n in range(10))) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> elements = it.elements() - >>> elements - SequenceView(['0', '1', '2']) - >>> next(it) - '3' - >>> elements - SequenceView(['0', '1', '2', '3']) - - By default, the cache grows as the source iterable progresses, so beware of - wrapping very large or infinite iterables. Supply *maxlen* to limit the - size of the cache (this of course limits how far back you can seek). - - >>> from itertools import count - >>> it = seekable((str(n) for n in count()), maxlen=2) - >>> next(it), next(it), next(it), next(it) - ('0', '1', '2', '3') - >>> list(it.elements()) - ['2', '3'] - >>> it.seek(0) - >>> next(it), next(it), next(it), next(it) - ('2', '3', '4', '5') - >>> next(it) - '6' - - """ - - def __init__(self, iterable, maxlen=None): - self._source = iter(iterable) - if maxlen is None: - self._cache = [] - else: - self._cache = deque([], maxlen) - self._index = None - - def __iter__(self): - return self - - def __next__(self): - if self._index is not None: - try: - item = self._cache[self._index] - except IndexError: - self._index = None - else: - self._index += 1 - return item - - item = next(self._source) - self._cache.append(item) - return item - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - try: - peeked = next(self) - except StopIteration: - if default is _marker: - raise - return default - if self._index is None: - self._index = len(self._cache) - self._index -= 1 - return peeked - - def elements(self): - return SequenceView(self._cache) - - def seek(self, index): - self._index = index - remainder = index - len(self._cache) - if remainder > 0: - consume(self, remainder) - - -class run_length: - """ - :func:`run_length.encode` compresses an iterable with run-length encoding. - It yields groups of repeated items with the count of how many times they - were repeated: - - >>> uncompressed = 'abbcccdddd' - >>> list(run_length.encode(uncompressed)) - [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - - :func:`run_length.decode` decompresses an iterable that was previously - compressed with run-length encoding. It yields the items of the - decompressed iterable: - - >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> list(run_length.decode(compressed)) - ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] - - """ - - @staticmethod - def encode(iterable): - return ((k, ilen(g)) for k, g in groupby(iterable)) - - @staticmethod - def decode(iterable): - return chain.from_iterable(repeat(k, n) for k, n in iterable) - - -def exactly_n(iterable, n, predicate=bool): - """Return ``True`` if exactly ``n`` items in the iterable are ``True`` - according to the *predicate* function. - - >>> exactly_n([True, True, False], 2) - True - >>> exactly_n([True, True, False], 1) - False - >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) - True - - The iterable will be advanced until ``n + 1`` truthy items are encountered, - so avoid calling it on infinite iterables. - - """ - return len(take(n + 1, filter(predicate, iterable))) == n - - -def circular_shifts(iterable): - """Return a list of circular shifts of *iterable*. - - >>> circular_shifts(range(4)) - [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] - """ - lst = list(iterable) - return take(len(lst), windowed(cycle(lst), len(lst))) - - -def make_decorator(wrapping_func, result_index=0): - """Return a decorator version of *wrapping_func*, which is a function that - modifies an iterable. *result_index* is the position in that function's - signature where the iterable goes. - - This lets you use itertools on the "production end," i.e. at function - definition. This can augment what the function returns without changing the - function's code. - - For example, to produce a decorator version of :func:`chunked`: - - >>> from more_itertools import chunked - >>> chunker = make_decorator(chunked, result_index=0) - >>> @chunker(3) - ... def iter_range(n): - ... return iter(range(n)) - ... - >>> list(iter_range(9)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - - To only allow truthy items to be returned: - - >>> truth_serum = make_decorator(filter, result_index=1) - >>> @truth_serum(bool) - ... def boolean_test(): - ... return [0, 1, '', ' ', False, True] - ... - >>> list(boolean_test()) - [1, ' ', True] - - The :func:`peekable` and :func:`seekable` wrappers make for practical - decorators: - - >>> from more_itertools import peekable - >>> peekable_function = make_decorator(peekable) - >>> @peekable_function() - ... def str_range(*args): - ... return (str(x) for x in range(*args)) - ... - >>> it = str_range(1, 20, 2) - >>> next(it), next(it), next(it) - ('1', '3', '5') - >>> it.peek() - '7' - >>> next(it) - '7' - - """ - # See https://sites.google.com/site/bbayles/index/decorator_factory for - # notes on how this works. - def decorator(*wrapping_args, **wrapping_kwargs): - def outer_wrapper(f): - def inner_wrapper(*args, **kwargs): - result = f(*args, **kwargs) - wrapping_args_ = list(wrapping_args) - wrapping_args_.insert(result_index, result) - return wrapping_func(*wrapping_args_, **wrapping_kwargs) - - return inner_wrapper - - return outer_wrapper - - return decorator - - -def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): - """Return a dictionary that maps the items in *iterable* to categories - defined by *keyfunc*, transforms them with *valuefunc*, and - then summarizes them by category with *reducefunc*. - - *valuefunc* defaults to the identity function if it is unspecified. - If *reducefunc* is unspecified, no summarization takes place: - - >>> keyfunc = lambda x: x.upper() - >>> result = map_reduce('abbccc', keyfunc) - >>> sorted(result.items()) - [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] - - Specifying *valuefunc* transforms the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> result = map_reduce('abbccc', keyfunc, valuefunc) - >>> sorted(result.items()) - [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] - - Specifying *reducefunc* summarizes the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> reducefunc = sum - >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) - >>> sorted(result.items()) - [('A', 1), ('B', 2), ('C', 3)] - - You may want to filter the input iterable before applying the map/reduce - procedure: - - >>> all_items = range(30) - >>> items = [x for x in all_items if 10 <= x <= 20] # Filter - >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 - >>> categories = map_reduce(items, keyfunc=keyfunc) - >>> sorted(categories.items()) - [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] - >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) - >>> sorted(summaries.items()) - [(0, 90), (1, 75)] - - Note that all items in the iterable are gathered into a list before the - summarization step, which may require significant storage. - - The returned object is a :obj:`collections.defaultdict` with the - ``default_factory`` set to ``None``, such that it behaves like a normal - dictionary. - - """ - valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc - - ret = defaultdict(list) - for item in iterable: - key = keyfunc(item) - value = valuefunc(item) - ret[key].append(value) - - if reducefunc is not None: - for key, value_list in ret.items(): - ret[key] = reducefunc(value_list) - - ret.default_factory = None - return ret - - -def rlocate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``, starting from the right and moving left. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 - [4, 2, 1] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item: - - >>> iterable = iter('abcb') - >>> pred = lambda x: x == 'b' - >>> list(rlocate(iterable, pred)) - [3, 1] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(rlocate(iterable, pred=pred, window_size=3)) - [9, 5, 1] - - Beware, this function won't return anything for infinite iterables. - If *iterable* is reversible, ``rlocate`` will reverse it and search from - the right. Otherwise, it will search from the left and return the results - in reverse order. - - See :func:`locate` to for other example applications. - - """ - if window_size is None: - try: - len_iter = len(iterable) - return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) - except TypeError: - pass - - return reversed(list(locate(iterable, pred, window_size))) - - -def replace(iterable, pred, substitutes, count=None, window_size=1): - """Yield the items from *iterable*, replacing the items for which *pred* - returns ``True`` with the items from the iterable *substitutes*. - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] - >>> pred = lambda x: x == 0 - >>> substitutes = (2, 3) - >>> list(replace(iterable, pred, substitutes)) - [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] - - If *count* is given, the number of replacements will be limited: - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] - >>> pred = lambda x: x == 0 - >>> substitutes = [None] - >>> list(replace(iterable, pred, substitutes, count=2)) - [1, 1, None, 1, 1, None, 1, 1, 0] - - Use *window_size* to control the number of items passed as arguments to - *pred*. This allows for locating and replacing subsequences. - - >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] - >>> window_size = 3 - >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred - >>> substitutes = [3, 4] # Splice in these items - >>> list(replace(iterable, pred, substitutes, window_size=window_size)) - [3, 4, 5, 3, 4, 5] - - """ - if window_size < 1: - raise ValueError('window_size must be at least 1') - - # Save the substitutes iterable, since it's used more than once - substitutes = tuple(substitutes) - - # Add padding such that the number of windows matches the length of the - # iterable - it = chain(iterable, [_marker] * (window_size - 1)) - windows = windowed(it, window_size) - - n = 0 - for w in windows: - # If the current window matches our predicate (and we haven't hit - # our maximum number of replacements), splice in the substitutes - # and then consume the following windows that overlap with this one. - # For example, if the iterable is (0, 1, 2, 3, 4...) - # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... - # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) - if pred(*w): - if (count is None) or (n < count): - n += 1 - yield from substitutes - consume(windows, window_size - 1) - continue - - # If there was no match (or we've reached the replacement limit), - # yield the first item from the window. - if w and (w[0] is not _marker): - yield w[0] - - -def partitions(iterable): - """Yield all possible order-preserving partitions of *iterable*. - - >>> iterable = 'abc' - >>> for part in partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['a', 'b', 'c'] - - This is unrelated to :func:`partition`. - - """ - sequence = list(iterable) - n = len(sequence) - for i in powerset(range(1, n)): - yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] - - -def set_partitions(iterable, k=None): - """ - Yield the set partitions of *iterable* into *k* parts. Set partitions are - not order-preserving. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable, 2): - ... print([''.join(p) for p in part]) - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - - - If *k* is not given, every set partition is generated. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - ['a', 'b', 'c'] - - """ - L = list(iterable) - n = len(L) - if k is not None: - if k < 1: - raise ValueError( - "Can't partition in a negative or zero number of groups" - ) - elif k > n: - return - - def set_partitions_helper(L, k): - n = len(L) - if k == 1: - yield [L] - elif n == k: - yield [[s] for s in L] - else: - e, *M = L - for p in set_partitions_helper(M, k - 1): - yield [[e], *p] - for p in set_partitions_helper(M, k): - for i in range(len(p)): - yield p[:i] + [[e] + p[i]] + p[i + 1 :] - - if k is None: - for k in range(1, n + 1): - yield from set_partitions_helper(L, k) - else: - yield from set_partitions_helper(L, k) - - -class time_limited: - """ - Yield items from *iterable* until *limit_seconds* have passed. - If the time limit expires before all items have been yielded, the - ``timed_out`` parameter will be set to ``True``. - - >>> from time import sleep - >>> def generator(): - ... yield 1 - ... yield 2 - ... sleep(0.2) - ... yield 3 - >>> iterable = time_limited(0.1, generator()) - >>> list(iterable) - [1, 2] - >>> iterable.timed_out - True - - Note that the time is checked before each item is yielded, and iteration - stops if the time elapsed is greater than *limit_seconds*. If your time - limit is 1 second, but it takes 2 seconds to generate the first item from - the iterable, the function will run for 2 seconds and not yield anything. - - """ - - def __init__(self, limit_seconds, iterable): - if limit_seconds < 0: - raise ValueError('limit_seconds must be positive') - self.limit_seconds = limit_seconds - self._iterable = iter(iterable) - self._start_time = monotonic() - self.timed_out = False - - def __iter__(self): - return self - - def __next__(self): - item = next(self._iterable) - if monotonic() - self._start_time > self.limit_seconds: - self.timed_out = True - raise StopIteration - - return item - - -def only(iterable, default=None, too_long=None): - """If *iterable* has only one item, return it. - If it has zero items, return *default*. - If it has more than one item, raise the exception given by *too_long*, - which is ``ValueError`` by default. - - >>> only([], default='missing') - 'missing' - >>> only([1]) - 1 - >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 1, 2, - and perhaps more.' - >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - TypeError - - Note that :func:`only` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check - iterable contents less destructively. - """ - it = iter(iterable) - first_value = next(it, default) - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def ichunked(iterable, n): - """Break *iterable* into sub-iterables with *n* elements each. - :func:`ichunked` is like :func:`chunked`, but it yields iterables - instead of lists. - - If the sub-iterables are read in order, the elements of *iterable* - won't be stored in memory. - If they are read out of order, :func:`itertools.tee` is used to cache - elements as necessary. - - >>> from itertools import count - >>> all_chunks = ichunked(count(), 4) - >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) - >>> list(c_2) # c_1's elements have been cached; c_3's haven't been - [4, 5, 6, 7] - >>> list(c_1) - [0, 1, 2, 3] - >>> list(c_3) - [8, 9, 10, 11] - - """ - source = iter(iterable) - - while True: - # Check to see whether we're at the end of the source iterable - item = next(source, _marker) - if item is _marker: - return - - # Clone the source and yield an n-length slice - source, it = tee(chain([item], source)) - yield islice(it, n) - - # Advance the source iterable - consume(source, n) - - -def distinct_combinations(iterable, r): - """Yield the distinct combinations of *r* items taken from *iterable*. - - >>> list(distinct_combinations([0, 0, 1], 2)) - [(0, 0), (0, 1)] - - Equivalent to ``set(combinations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - """ - if r < 0: - raise ValueError('r must be non-negative') - elif r == 0: - yield () - return - pool = tuple(iterable) - generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] - current_combo = [None] * r - level = 0 - while generators: - try: - cur_idx, p = next(generators[-1]) - except StopIteration: - generators.pop() - level -= 1 - continue - current_combo[level] = p - if level + 1 == r: - yield tuple(current_combo) - else: - generators.append( - unique_everseen( - enumerate(pool[cur_idx + 1 :], cur_idx + 1), - key=itemgetter(1), - ) - ) - level += 1 - - -def filter_except(validator, iterable, *exceptions): - """Yield the items from *iterable* for which the *validator* function does - not raise one of the specified *exceptions*. - - *validator* is called for each item in *iterable*. - It should be a function that accepts one argument and raises an exception - if that item is not valid. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(filter_except(int, iterable, ValueError, TypeError)) - ['1', '2', '4'] - - If an exception other than one given by *exceptions* is raised by - *validator*, it is raised like normal. - """ - for item in iterable: - try: - validator(item) - except exceptions: - pass - else: - yield item - - -def map_except(function, iterable, *exceptions): - """Transform each item from *iterable* with *function* and yield the - result, unless *function* raises one of the specified *exceptions*. - - *function* is called to transform each item in *iterable*. - It should accept one argument. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(map_except(int, iterable, ValueError, TypeError)) - [1, 2, 4] - - If an exception other than one given by *exceptions* is raised by - *function*, it is raised like normal. - """ - for item in iterable: - try: - yield function(item) - except exceptions: - pass - - -def map_if(iterable, pred, func, func_else=lambda x: x): - """Evaluate each item from *iterable* using *pred*. If the result is - equivalent to ``True``, transform the item with *func* and yield it. - Otherwise, transform the item with *func_else* and yield it. - - *pred*, *func*, and *func_else* should each be functions that accept - one argument. By default, *func_else* is the identity function. - - >>> from math import sqrt - >>> iterable = list(range(-5, 5)) - >>> iterable - [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] - >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig')) - [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig'] - >>> list(map_if(iterable, lambda x: x >= 0, - ... lambda x: f'{sqrt(x):.2f}', lambda x: None)) - [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00'] - """ - for item in iterable: - yield func(item) if pred(item) else func_else(item) - - -def _sample_unweighted(iterable, k): - # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: - # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". - - # Fill up the reservoir (collection of samples) with the first `k` samples - reservoir = take(k, iterable) - - # Generate random number that's the largest in a sample of k U(0,1) numbers - # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic - W = exp(log(random()) / k) - - # The number of elements to skip before changing the reservoir is a random - # number with a geometric distribution. Sample it using random() and logs. - next_index = k + floor(log(random()) / log(1 - W)) - - for index, element in enumerate(iterable, k): - - if index == next_index: - reservoir[randrange(k)] = element - # The new W is the largest in a sample of k U(0, `old_W`) numbers - W *= exp(log(random()) / k) - next_index += floor(log(random()) / log(1 - W)) + 1 - - return reservoir - - -def _sample_weighted(iterable, k, weights): - # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : - # "Weighted random sampling with a reservoir". - - # Log-transform for numerical stability for weights that are small/large - weight_keys = (log(random()) / weight for weight in weights) - - # Fill up the reservoir (collection of samples) with the first `k` - # weight-keys and elements, then heapify the list. - reservoir = take(k, zip(weight_keys, iterable)) - heapify(reservoir) - - # The number of jumps before changing the reservoir is a random variable - # with an exponential distribution. Sample it using random() and logs. - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - - for weight, element in zip(weights, iterable): - if weight >= weights_to_skip: - # The notation here is consistent with the paper, but we store - # the weight-keys in log-space for better numerical stability. - smallest_weight_key, _ = reservoir[0] - t_w = exp(weight * smallest_weight_key) - r_2 = uniform(t_w, 1) # generate U(t_w, 1) - weight_key = log(r_2) / weight - heapreplace(reservoir, (weight_key, element)) - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - else: - weights_to_skip -= weight - - # Equivalent to [element for weight_key, element in sorted(reservoir)] - return [heappop(reservoir)[1] for _ in range(k)] - - -def sample(iterable, k, weights=None): - """Return a *k*-length list of elements chosen (without replacement) - from the *iterable*. Like :func:`random.sample`, but works on iterables - of unknown length. - - >>> iterable = range(100) - >>> sample(iterable, 5) # doctest: +SKIP - [81, 60, 96, 16, 4] - - An iterable with *weights* may also be given: - - >>> iterable = range(100) - >>> weights = (i * i + 1 for i in range(100)) - >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP - [79, 67, 74, 66, 78] - - The algorithm can also be used to generate weighted random permutations. - The relative weight of each item determines the probability that it - appears late in the permutation. - - >>> data = "abcdefgh" - >>> weights = range(1, len(data) + 1) - >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP - ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] - """ - if k == 0: - return [] - - iterable = iter(iterable) - if weights is None: - return _sample_unweighted(iterable, k) - else: - weights = iter(weights) - return _sample_weighted(iterable, k, weights) - - -def is_sorted(iterable, key=None, reverse=False, strict=False): - """Returns ``True`` if the items of iterable are in sorted order, and - ``False`` otherwise. *key* and *reverse* have the same meaning that they do - in the built-in :func:`sorted` function. - - >>> is_sorted(['1', '2', '3', '4', '5'], key=int) - True - >>> is_sorted([5, 4, 3, 1, 2], reverse=True) - False - - If *strict*, tests for strict sorting, that is, returns ``False`` if equal - elements are found: - - >>> is_sorted([1, 2, 2]) - True - >>> is_sorted([1, 2, 2], strict=True) - False - - The function returns ``False`` after encountering the first out-of-order - item. If there are no out-of-order items, the iterable is exhausted. - """ - - compare = (le if reverse else ge) if strict else (lt if reverse else gt) - it = iterable if key is None else map(key, iterable) - return not any(starmap(compare, pairwise(it))) - - -class AbortThread(BaseException): - pass - - -class callback_iter: - """Convert a function that uses callbacks to an iterator. - - Let *func* be a function that takes a `callback` keyword argument. - For example: - - >>> def func(callback=None): - ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: - ... if callback: - ... callback(i, c) - ... return 4 - - - Use ``with callback_iter(func)`` to get an iterator over the parameters - that are delivered to the callback. - - >>> with callback_iter(func) as it: - ... for args, kwargs in it: - ... print(args) - (1, 'a') - (2, 'b') - (3, 'c') - - The function will be called in a background thread. The ``done`` property - indicates whether it has completed execution. - - >>> it.done - True - - If it completes successfully, its return value will be available - in the ``result`` property. - - >>> it.result - 4 - - Notes: - - * If the function uses some keyword argument besides ``callback``, supply - *callback_kwd*. - * If it finished executing, but raised an exception, accessing the - ``result`` property will raise the same exception. - * If it hasn't finished executing, accessing the ``result`` - property from within the ``with`` block will raise ``RuntimeError``. - * If it hasn't finished executing, accessing the ``result`` property from - outside the ``with`` block will raise a - ``more_itertools.AbortThread`` exception. - * Provide *wait_seconds* to adjust how frequently the it is polled for - output. - - """ - - def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): - self._func = func - self._callback_kwd = callback_kwd - self._aborted = False - self._future = None - self._wait_seconds = wait_seconds - self._executor = ThreadPoolExecutor(max_workers=1) - self._iterator = self._reader() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self._aborted = True - self._executor.shutdown() - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterator) - - @property - def done(self): - if self._future is None: - return False - return self._future.done() - - @property - def result(self): - if not self.done: - raise RuntimeError('Function has not yet completed') - - return self._future.result() - - def _reader(self): - q = Queue() - - def callback(*args, **kwargs): - if self._aborted: - raise AbortThread('canceled by user') - - q.put((args, kwargs)) - - self._future = self._executor.submit( - self._func, **{self._callback_kwd: callback} - ) - - while True: - try: - item = q.get(timeout=self._wait_seconds) - except Empty: - pass - else: - q.task_done() - yield item - - if self._future.done(): - break - - remaining = [] - while True: - try: - item = q.get_nowait() - except Empty: - break - else: - q.task_done() - remaining.append(item) - q.join() - yield from remaining - - -def windowed_complete(iterable, n): - """ - Yield ``(beginning, middle, end)`` tuples, where: - - * Each ``middle`` has *n* items from *iterable* - * Each ``beginning`` has the items before the ones in ``middle`` - * Each ``end`` has the items after the ones in ``middle`` - - >>> iterable = range(7) - >>> n = 3 - >>> for beginning, middle, end in windowed_complete(iterable, n): - ... print(beginning, middle, end) - () (0, 1, 2) (3, 4, 5, 6) - (0,) (1, 2, 3) (4, 5, 6) - (0, 1) (2, 3, 4) (5, 6) - (0, 1, 2) (3, 4, 5) (6,) - (0, 1, 2, 3) (4, 5, 6) () - - Note that *n* must be at least 0 and most equal to the length of - *iterable*. - - This function will exhaust the iterable and may require significant - storage. - """ - if n < 0: - raise ValueError('n must be >= 0') - - seq = tuple(iterable) - size = len(seq) - - if n > size: - raise ValueError('n must be <= len(seq)') - - for i in range(size - n + 1): - beginning = seq[:i] - middle = seq[i : i + n] - end = seq[i + n :] - yield beginning, middle, end - - -def all_unique(iterable, key=None): - """ - Returns ``True`` if all the elements of *iterable* are unique (no two - elements are equal). - - >>> all_unique('ABCB') - False - - If a *key* function is specified, it will be used to make comparisons. - - >>> all_unique('ABCb') - True - >>> all_unique('ABCb', str.lower) - False - - The function returns as soon as the first non-unique element is - encountered. Iterables with a mix of hashable and unhashable items can - be used, but the function will be slower for unhashable items. - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - for element in map(key, iterable) if key else iterable: - try: - if element in seenset: - return False - seenset_add(element) - except TypeError: - if element in seenlist: - return False - seenlist_add(element) - return True - - -def nth_product(index, *args): - """Equivalent to ``list(product(*args))[index]``. - - The products of *args* can be ordered lexicographically. - :func:`nth_product` computes the product at sort position *index* without - computing the previous products. - - >>> nth_product(8, range(2), range(2), range(2), range(2)) - (1, 0, 0, 0) - - ``IndexError`` will be raised if the given *index* is invalid. - """ - pools = list(map(tuple, reversed(args))) - ns = list(map(len, pools)) - - c = reduce(mul, ns) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - result = [] - for pool, n in zip(pools, ns): - result.append(pool[index % n]) - index //= n - - return tuple(reversed(result)) - - -def nth_permutation(iterable, r, index): - """Equivalent to ``list(permutations(iterable, r))[index]``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`nth_permutation` - computes the subsequence at sort position *index* directly, without - computing the previous subsequences. - - >>> nth_permutation('ghijk', 2, 5) - ('h', 'i') - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = list(iterable) - n = len(pool) - - if r is None or r == n: - r, c = n, factorial(n) - elif not 0 <= r < n: - raise ValueError - else: - c = factorial(n) // factorial(n - r) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - if c == 0: - return tuple() - - result = [0] * r - q = index * factorial(n) // c if r < n else index - for d in range(1, n + 1): - q, i = divmod(q, d) - if 0 <= n - d < r: - result[n - d] = i - if q == 0: - break - - return tuple(map(pool.pop, result)) - - -def value_chain(*args): - """Yield all arguments passed to the function in the same order in which - they were passed. If an argument itself is iterable then iterate over its - values. - - >>> list(value_chain(1, 2, 3, [4, 5, 6])) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and are emitted - as-is: - - >>> list(value_chain('12', '34', ['56', '78'])) - ['12', '34', '56', '78'] - - - Multiple levels of nesting are not flattened. - - """ - for value in args: - if isinstance(value, (str, bytes)): - yield value - continue - try: - yield from value - except TypeError: - yield value - - -def product_index(element, *args): - """Equivalent to ``list(product(*args)).index(element)`` - - The products of *args* can be ordered lexicographically. - :func:`product_index` computes the first index of *element* without - computing the previous products. - - >>> product_index([8, 2], range(10), range(5)) - 42 - - ``ValueError`` will be raised if the given *element* isn't in the product - of *args*. - """ - index = 0 - - for x, pool in zip_longest(element, args, fillvalue=_marker): - if x is _marker or pool is _marker: - raise ValueError('element is not a product of args') - - pool = tuple(pool) - index = index * len(pool) + pool.index(x) - - return index - - -def combination_index(element, iterable): - """Equivalent to ``list(combinations(iterable, r)).index(element)`` - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`combination_index` computes the index of the - first *element*, without computing the previous combinations. - - >>> combination_index('adf', 'abcdefg') - 10 - - ``ValueError`` will be raised if the given *element* isn't one of the - combinations of *iterable*. - """ - element = enumerate(element) - k, y = next(element, (None, None)) - if k is None: - return 0 - - indexes = [] - pool = enumerate(iterable) - for n, x in pool: - if x == y: - indexes.append(n) - tmp, y = next(element, (None, None)) - if tmp is None: - break - else: - k = tmp - else: - raise ValueError('element is not a combination of iterable') - - n, _ = last(pool, default=(n, None)) - - # Python versiosn below 3.8 don't have math.comb - index = 1 - for i, j in enumerate(reversed(indexes), start=1): - j = n - j - if i <= j: - index += factorial(j) // (factorial(i) * factorial(j - i)) - - return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index - - -def permutation_index(element, iterable): - """Equivalent to ``list(permutations(iterable, r)).index(element)``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`permutation_index` - computes the index of the first *element* directly, without computing - the previous permutations. - - >>> permutation_index([1, 3, 2], range(5)) - 19 - - ``ValueError`` will be raised if the given *element* isn't one of the - permutations of *iterable*. - """ - index = 0 - pool = list(iterable) - for i, x in zip(range(len(pool), -1, -1), element): - r = pool.index(x) - index = index * i + r - del pool[r] - - return index - - -class countable: - """Wrap *iterable* and keep a count of how many items have been consumed. - - The ``items_seen`` attribute starts at ``0`` and increments as the iterable - is consumed: - - >>> iterable = map(str, range(10)) - >>> it = countable(iterable) - >>> it.items_seen - 0 - >>> next(it), next(it) - ('0', '1') - >>> list(it) - ['2', '3', '4', '5', '6', '7', '8', '9'] - >>> it.items_seen - 10 - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self.items_seen = 0 - - def __iter__(self): - return self - - def __next__(self): - item = next(self._it) - self.items_seen += 1 - - return item - - -def chunked_even(iterable, n): - """Break *iterable* into lists of approximately length *n*. - Items are distributed such the lengths of the lists differ by at most - 1 item. - - >>> iterable = [1, 2, 3, 4, 5, 6, 7] - >>> n = 3 - >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2 - [[1, 2, 3], [4, 5], [6, 7]] - >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1 - [[1, 2, 3], [4, 5, 6], [7]] - - """ - - len_method = getattr(iterable, '__len__', None) - - if len_method is None: - return _chunked_even_online(iterable, n) - else: - return _chunked_even_finite(iterable, len_method(), n) - - -def _chunked_even_online(iterable, n): - buffer = [] - maxbuf = n + (n - 2) * (n - 1) - for x in iterable: - buffer.append(x) - if len(buffer) == maxbuf: - yield buffer[:n] - buffer = buffer[n:] - yield from _chunked_even_finite(buffer, len(buffer), n) - - -def _chunked_even_finite(iterable, N, n): - if N < 1: - return - - # Lists are either size `full_size <= n` or `partial_size = full_size - 1` - q, r = divmod(N, n) - num_lists = q + (1 if r > 0 else 0) - q, r = divmod(N, num_lists) - full_size = q + (1 if r > 0 else 0) - partial_size = full_size - 1 - num_full = N - partial_size * num_lists - num_partial = num_lists - num_full - - buffer = [] - iterator = iter(iterable) - - # Yield num_full lists of full_size - for x in iterator: - buffer.append(x) - if len(buffer) == full_size: - yield buffer - buffer = [] - num_full -= 1 - if num_full <= 0: - break - - # Yield num_partial lists of partial_size - for x in iterator: - buffer.append(x) - if len(buffer) == partial_size: - yield buffer - buffer = [] - num_partial -= 1 - - -def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): - """A version of :func:`zip` that "broadcasts" any scalar - (i.e., non-iterable) items into output tuples. - - >>> iterable_1 = [1, 2, 3] - >>> iterable_2 = ['a', 'b', 'c'] - >>> scalar = '_' - >>> list(zip_broadcast(iterable_1, iterable_2, scalar)) - [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')] - - The *scalar_types* keyword argument determines what types are considered - scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to - treat strings and byte strings as iterable: - - >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None)) - [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')] - - If the *strict* keyword argument is ``True``, then - ``UnequalIterablesError`` will be raised if any of the iterables have - different lengthss. - """ - - def is_scalar(obj): - if scalar_types and isinstance(obj, scalar_types): - return True - try: - iter(obj) - except TypeError: - return True - else: - return False - - size = len(objects) - if not size: - return - - iterables, iterable_positions = [], [] - scalars, scalar_positions = [], [] - for i, obj in enumerate(objects): - if is_scalar(obj): - scalars.append(obj) - scalar_positions.append(i) - else: - iterables.append(iter(obj)) - iterable_positions.append(i) - - if len(scalars) == size: - yield tuple(objects) - return - - zipper = _zip_equal if strict else zip - for item in zipper(*iterables): - new_item = [None] * size - - for i, elem in zip(iterable_positions, item): - new_item[i] = elem - - for i, elem in zip(scalar_positions, scalars): - new_item[i] = elem - - yield tuple(new_item) - - -def unique_in_window(iterable, n, key=None): - """Yield the items from *iterable* that haven't been seen recently. - *n* is the size of the lookback window. - - >>> iterable = [0, 1, 0, 2, 3, 0] - >>> n = 3 - >>> list(unique_in_window(iterable, n)) - [0, 1, 2, 3, 0] - - The *key* function, if provided, will be used to determine uniqueness: - - >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower())) - ['a', 'b', 'c', 'd', 'a'] - - The items in *iterable* must be hashable. - - """ - if n <= 0: - raise ValueError('n must be greater than 0') - - window = deque(maxlen=n) - uniques = set() - use_key = key is not None - - for item in iterable: - k = key(item) if use_key else item - if k in uniques: - continue - - if len(uniques) == n: - uniques.discard(window[0]) - - uniques.add(k) - window.append(k) - - yield item - - -def duplicates_everseen(iterable, key=None): - """Yield duplicate elements after their first appearance. - - >>> list(duplicates_everseen('mississippi')) - ['s', 'i', 's', 's', 'i', 'p', 'i'] - >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower)) - ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a'] - - This function is analagous to :func:`unique_everseen` and is subject to - the same performance considerations. - - """ - seen_set = set() - seen_list = [] - use_key = key is not None - - for element in iterable: - k = key(element) if use_key else element - try: - if k not in seen_set: - seen_set.add(k) - else: - yield element - except TypeError: - if k not in seen_list: - seen_list.append(k) - else: - yield element - - -def duplicates_justseen(iterable, key=None): - """Yields serially-duplicate elements after their first appearance. - - >>> list(duplicates_justseen('mississippi')) - ['s', 's', 'p'] - >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower)) - ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a'] - - This function is analagous to :func:`unique_justseen`. - - """ - return flatten( - map( - lambda group_tuple: islice_extended(group_tuple[1])[1:], - groupby(iterable, key), - ) - ) - - -def minmax(iterable_or_value, *others, key=None, default=_marker): - """Returns both the smallest and largest items in an iterable - or the largest of two or more arguments. - - >>> minmax([3, 1, 5]) - (1, 5) - - >>> minmax(4, 2, 6) - (2, 6) - - If a *key* function is provided, it will be used to transform the input - items for comparison. - - >>> minmax([5, 30], key=str) # '30' sorts before '5' - (30, 5) - - If a *default* value is provided, it will be returned if there are no - input items. - - >>> minmax([], default=(0, 0)) - (0, 0) - - Otherwise ``ValueError`` is raised. - - This function is based on the - `recipe `__ by - Raymond Hettinger and takes care to minimize the number of comparisons - performed. - """ - iterable = (iterable_or_value, *others) if others else iterable_or_value - - it = iter(iterable) - - try: - lo = hi = next(it) - except StopIteration as e: - if default is _marker: - raise ValueError( - '`minmax()` argument is an empty iterable. ' - 'Provide a `default` value to suppress this error.' - ) from e - return default - - # Different branches depending on the presence of key. This saves a lot - # of unimportant copies which would slow the "key=None" branch - # significantly down. - if key is None: - for x, y in zip_longest(it, it, fillvalue=lo): - if y < x: - x, y = y, x - if x < lo: - lo = x - if hi < y: - hi = y - - else: - lo_key = hi_key = key(lo) - - for x, y in zip_longest(it, it, fillvalue=lo): - - x_key, y_key = key(x), key(y) - - if y_key < x_key: - x, y, x_key, y_key = y, x, y_key, x_key - if x_key < lo_key: - lo, lo_key = x, x_key - if hi_key < y_key: - hi, hi_key = y, y_key - - return lo, hi diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/recipes.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/recipes.py deleted file mode 100755 index a259642..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/more_itertools/recipes.py +++ /dev/null @@ -1,698 +0,0 @@ -"""Imported from the recipes section of the itertools documentation. - -All functions taken from the recipes section of the itertools library docs -[1]_. -Some backward-compatible usability improvements have been made. - -.. [1] http://docs.python.org/library/itertools.html#recipes - -""" -import warnings -from collections import deque -from itertools import ( - chain, - combinations, - count, - cycle, - groupby, - islice, - repeat, - starmap, - tee, - zip_longest, -) -import operator -from random import randrange, sample, choice - -__all__ = [ - 'all_equal', - 'before_and_after', - 'consume', - 'convolve', - 'dotproduct', - 'first_true', - 'flatten', - 'grouper', - 'iter_except', - 'ncycles', - 'nth', - 'nth_combination', - 'padnone', - 'pad_none', - 'pairwise', - 'partition', - 'powerset', - 'prepend', - 'quantify', - 'random_combination_with_replacement', - 'random_combination', - 'random_permutation', - 'random_product', - 'repeatfunc', - 'roundrobin', - 'sliding_window', - 'tabulate', - 'tail', - 'take', - 'triplewise', - 'unique_everseen', - 'unique_justseen', -] - - -def take(n, iterable): - """Return first *n* items of the iterable as a list. - - >>> take(3, range(10)) - [0, 1, 2] - - If there are fewer than *n* items in the iterable, all of them are - returned. - - >>> take(10, range(3)) - [0, 1, 2] - - """ - return list(islice(iterable, n)) - - -def tabulate(function, start=0): - """Return an iterator over the results of ``func(start)``, - ``func(start + 1)``, ``func(start + 2)``... - - *func* should be a function that accepts one integer argument. - - If *start* is not specified it defaults to 0. It will be incremented each - time the iterator is advanced. - - >>> square = lambda x: x ** 2 - >>> iterator = tabulate(square, -3) - >>> take(4, iterator) - [9, 4, 1, 0] - - """ - return map(function, count(start)) - - -def tail(n, iterable): - """Return an iterator over the last *n* items of *iterable*. - - >>> t = tail(3, 'ABCDEFG') - >>> list(t) - ['E', 'F', 'G'] - - """ - return iter(deque(iterable, maxlen=n)) - - -def consume(iterator, n=None): - """Advance *iterable* by *n* steps. If *n* is ``None``, consume it - entirely. - - Efficiently exhausts an iterator without returning values. Defaults to - consuming the whole iterator, but an optional second argument may be - provided to limit consumption. - - >>> i = (x for x in range(10)) - >>> next(i) - 0 - >>> consume(i, 3) - >>> next(i) - 4 - >>> consume(i) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - If the iterator has fewer items remaining than the provided limit, the - whole iterator will be consumed. - - >>> i = (x for x in range(3)) - >>> consume(i, 5) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - """ - # Use functions that consume iterators at C speed. - if n is None: - # feed the entire iterator into a zero-length deque - deque(iterator, maxlen=0) - else: - # advance to the empty slice starting at position n - next(islice(iterator, n, n), None) - - -def nth(iterable, n, default=None): - """Returns the nth item or a default value. - - >>> l = range(10) - >>> nth(l, 3) - 3 - >>> nth(l, 20, "zebra") - 'zebra' - - """ - return next(islice(iterable, n, None), default) - - -def all_equal(iterable): - """ - Returns ``True`` if all the elements are equal to each other. - - >>> all_equal('aaaa') - True - >>> all_equal('aaab') - False - - """ - g = groupby(iterable) - return next(g, True) and not next(g, False) - - -def quantify(iterable, pred=bool): - """Return the how many times the predicate is true. - - >>> quantify([True, False, True]) - 2 - - """ - return sum(map(pred, iterable)) - - -def pad_none(iterable): - """Returns the sequence of elements and then returns ``None`` indefinitely. - - >>> take(5, pad_none(range(3))) - [0, 1, 2, None, None] - - Useful for emulating the behavior of the built-in :func:`map` function. - - See also :func:`padded`. - - """ - return chain(iterable, repeat(None)) - - -padnone = pad_none - - -def ncycles(iterable, n): - """Returns the sequence elements *n* times - - >>> list(ncycles(["a", "b"], 3)) - ['a', 'b', 'a', 'b', 'a', 'b'] - - """ - return chain.from_iterable(repeat(tuple(iterable), n)) - - -def dotproduct(vec1, vec2): - """Returns the dot product of the two iterables. - - >>> dotproduct([10, 10], [20, 20]) - 400 - - """ - return sum(map(operator.mul, vec1, vec2)) - - -def flatten(listOfLists): - """Return an iterator flattening one level of nesting in a list of lists. - - >>> list(flatten([[0, 1], [2, 3]])) - [0, 1, 2, 3] - - See also :func:`collapse`, which can flatten multiple levels of nesting. - - """ - return chain.from_iterable(listOfLists) - - -def repeatfunc(func, times=None, *args): - """Call *func* with *args* repeatedly, returning an iterable over the - results. - - If *times* is specified, the iterable will terminate after that many - repetitions: - - >>> from operator import add - >>> times = 4 - >>> args = 3, 5 - >>> list(repeatfunc(add, times, *args)) - [8, 8, 8, 8] - - If *times* is ``None`` the iterable will not terminate: - - >>> from random import randrange - >>> times = None - >>> args = 1, 11 - >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP - [2, 4, 8, 1, 8, 4] - - """ - if times is None: - return starmap(func, repeat(args)) - return starmap(func, repeat(args, times)) - - -def _pairwise(iterable): - """Returns an iterator of paired items, overlapping, from the original - - >>> take(4, pairwise(count())) - [(0, 1), (1, 2), (2, 3), (3, 4)] - - On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. - - """ - a, b = tee(iterable) - next(b, None) - yield from zip(a, b) - - -try: - from itertools import pairwise as itertools_pairwise -except ImportError: - pairwise = _pairwise -else: - - def pairwise(iterable): - yield from itertools_pairwise(iterable) - - pairwise.__doc__ = _pairwise.__doc__ - - -def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks. - - >>> list(grouper('ABCDEFG', 3, 'x')) - [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] - - """ - if isinstance(iterable, int): - warnings.warn( - "grouper expects iterable as first parameter", DeprecationWarning - ) - n, iterable = iterable, n - args = [iter(iterable)] * n - return zip_longest(fillvalue=fillvalue, *args) - - -def roundrobin(*iterables): - """Yields an item from each iterable, alternating between them. - - >>> list(roundrobin('ABC', 'D', 'EF')) - ['A', 'D', 'E', 'B', 'F', 'C'] - - This function produces the same output as :func:`interleave_longest`, but - may perform better for some inputs (in particular when the number of - iterables is small). - - """ - # Recipe credited to George Sakkis - pending = len(iterables) - nexts = cycle(iter(it).__next__ for it in iterables) - while pending: - try: - for next in nexts: - yield next() - except StopIteration: - pending -= 1 - nexts = cycle(islice(nexts, pending)) - - -def partition(pred, iterable): - """ - Returns a 2-tuple of iterables derived from the input iterable. - The first yields the items that have ``pred(item) == False``. - The second yields the items that have ``pred(item) == True``. - - >>> is_odd = lambda x: x % 2 != 0 - >>> iterable = range(10) - >>> even_items, odd_items = partition(is_odd, iterable) - >>> list(even_items), list(odd_items) - ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) - - If *pred* is None, :func:`bool` is used. - - >>> iterable = [0, 1, False, True, '', ' '] - >>> false_items, true_items = partition(None, iterable) - >>> list(false_items), list(true_items) - ([0, False, ''], [1, True, ' ']) - - """ - if pred is None: - pred = bool - - evaluations = ((pred(x), x) for x in iterable) - t1, t2 = tee(evaluations) - return ( - (x for (cond, x) in t1 if not cond), - (x for (cond, x) in t2 if cond), - ) - - -def powerset(iterable): - """Yields all possible subsets of the iterable. - - >>> list(powerset([1, 2, 3])) - [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] - - :func:`powerset` will operate on iterables that aren't :class:`set` - instances, so repeated elements in the input will produce repeated elements - in the output. Use :func:`unique_everseen` on the input to avoid generating - duplicates: - - >>> seq = [1, 1, 0] - >>> list(powerset(seq)) - [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] - >>> from more_itertools import unique_everseen - >>> list(powerset(unique_everseen(seq))) - [(), (1,), (0,), (1, 0)] - - """ - s = list(iterable) - return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) - - -def unique_everseen(iterable, key=None): - """ - Yield unique elements, preserving order. - - >>> list(unique_everseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D'] - >>> list(unique_everseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'D'] - - Sequences with a mix of hashable and unhashable items can be used. - The function will be slower (i.e., `O(n^2)`) for unhashable items. - - Remember that ``list`` objects are unhashable - you can use the *key* - parameter to transform the list to a tuple (which is hashable) to - avoid a slowdown. - - >>> iterable = ([1, 2], [2, 3], [1, 2]) - >>> list(unique_everseen(iterable)) # Slow - [[1, 2], [2, 3]] - >>> list(unique_everseen(iterable, key=tuple)) # Faster - [[1, 2], [2, 3]] - - Similary, you may want to convert unhashable ``set`` objects with - ``key=frozenset``. For ``dict`` objects, - ``key=lambda x: frozenset(x.items())`` can be used. - - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - use_key = key is not None - - for element in iterable: - k = key(element) if use_key else element - try: - if k not in seenset: - seenset_add(k) - yield element - except TypeError: - if k not in seenlist: - seenlist_add(k) - yield element - - -def unique_justseen(iterable, key=None): - """Yields elements in order, ignoring serial duplicates - - >>> list(unique_justseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D', 'A', 'B'] - >>> list(unique_justseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'A', 'D'] - - """ - return map(next, map(operator.itemgetter(1), groupby(iterable, key))) - - -def iter_except(func, exception, first=None): - """Yields results from a function repeatedly until an exception is raised. - - Converts a call-until-exception interface to an iterator interface. - Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel - to end the loop. - - >>> l = [0, 1, 2] - >>> list(iter_except(l.pop, IndexError)) - [2, 1, 0] - - Multiple exceptions can be specified as a stopping condition: - - >>> l = [1, 2, 3, '...', 4, 5, 6] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [7, 6, 5] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [4, 3, 2] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [] - - """ - try: - if first is not None: - yield first() - while 1: - yield func() - except exception: - pass - - -def first_true(iterable, default=None, pred=None): - """ - Returns the first true value in the iterable. - - If no true value is found, returns *default* - - If *pred* is not None, returns the first item for which - ``pred(item) == True`` . - - >>> first_true(range(10)) - 1 - >>> first_true(range(10), pred=lambda x: x > 5) - 6 - >>> first_true(range(10), default='missing', pred=lambda x: x > 9) - 'missing' - - """ - return next(filter(pred, iterable), default) - - -def random_product(*args, repeat=1): - """Draw an item at random from each of the input iterables. - - >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP - ('c', 3, 'Z') - - If *repeat* is provided as a keyword argument, that many items will be - drawn from each iterable. - - >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP - ('a', 2, 'd', 3) - - This equivalent to taking a random selection from - ``itertools.product(*args, **kwarg)``. - - """ - pools = [tuple(pool) for pool in args] * repeat - return tuple(choice(pool) for pool in pools) - - -def random_permutation(iterable, r=None): - """Return a random *r* length permutation of the elements in *iterable*. - - If *r* is not specified or is ``None``, then *r* defaults to the length of - *iterable*. - - >>> random_permutation(range(5)) # doctest:+SKIP - (3, 4, 0, 1, 2) - - This equivalent to taking a random selection from - ``itertools.permutations(iterable, r)``. - - """ - pool = tuple(iterable) - r = len(pool) if r is None else r - return tuple(sample(pool, r)) - - -def random_combination(iterable, r): - """Return a random *r* length subsequence of the elements in *iterable*. - - >>> random_combination(range(5), 3) # doctest:+SKIP - (2, 3, 4) - - This equivalent to taking a random selection from - ``itertools.combinations(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(sample(range(n), r)) - return tuple(pool[i] for i in indices) - - -def random_combination_with_replacement(iterable, r): - """Return a random *r* length subsequence of elements in *iterable*, - allowing individual elements to be repeated. - - >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP - (0, 0, 1, 2, 2) - - This equivalent to taking a random selection from - ``itertools.combinations_with_replacement(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(randrange(n) for i in range(r)) - return tuple(pool[i] for i in indices) - - -def nth_combination(iterable, r, index): - """Equivalent to ``list(combinations(iterable, r))[index]``. - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`nth_combination` computes the subsequence at - sort position *index* directly, without computing the previous - subsequences. - - >>> nth_combination(range(5), 3, 5) - (0, 3, 4) - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = tuple(iterable) - n = len(pool) - if (r < 0) or (r > n): - raise ValueError - - c = 1 - k = min(r, n - r) - for i in range(1, k + 1): - c = c * (n - k + i) // i - - if index < 0: - index += c - - if (index < 0) or (index >= c): - raise IndexError - - result = [] - while r: - c, n, r = c * r // n, n - 1, r - 1 - while index >= c: - index -= c - c, n = c * (n - r) // n, n - 1 - result.append(pool[-1 - n]) - - return tuple(result) - - -def prepend(value, iterator): - """Yield *value*, followed by the elements in *iterator*. - - >>> value = '0' - >>> iterator = ['1', '2', '3'] - >>> list(prepend(value, iterator)) - ['0', '1', '2', '3'] - - To prepend multiple values, see :func:`itertools.chain` - or :func:`value_chain`. - - """ - return chain([value], iterator) - - -def convolve(signal, kernel): - """Convolve the iterable *signal* with the iterable *kernel*. - - >>> signal = (1, 2, 3, 4, 5) - >>> kernel = [3, 2, 1] - >>> list(convolve(signal, kernel)) - [3, 8, 14, 20, 26, 14, 5] - - Note: the input arguments are not interchangeable, as the *kernel* - is immediately consumed and stored. - - """ - kernel = tuple(kernel)[::-1] - n = len(kernel) - window = deque([0], maxlen=n) * n - for x in chain(signal, repeat(0, n - 1)): - window.append(x) - yield sum(map(operator.mul, kernel, window)) - - -def before_and_after(predicate, it): - """A variant of :func:`takewhile` that allows complete access to the - remainder of the iterator. - - >>> it = iter('ABCdEfGhI') - >>> all_upper, remainder = before_and_after(str.isupper, it) - >>> ''.join(all_upper) - 'ABC' - >>> ''.join(remainder) # takewhile() would lose the 'd' - 'dEfGhI' - - Note that the first iterator must be fully consumed before the second - iterator can generate valid results. - """ - it = iter(it) - transition = [] - - def true_iterator(): - for elem in it: - if predicate(elem): - yield elem - else: - transition.append(elem) - return - - def remainder_iterator(): - yield from transition - yield from it - - return true_iterator(), remainder_iterator() - - -def triplewise(iterable): - """Return overlapping triplets from *iterable*. - - >>> list(triplewise('ABCDE')) - [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')] - - """ - for (a, _), (b, c) in pairwise(pairwise(iterable)): - yield a, b, c - - -def sliding_window(iterable, n): - """Return a sliding window of width *n* over *iterable*. - - >>> list(sliding_window(range(6), 4)) - [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)] - - If *iterable* has fewer than *n* items, then nothing is yielded: - - >>> list(sliding_window(range(3), 4)) - [] - - For a variant with more features, see :func:`windowed`. - """ - it = iter(iterable) - window = deque(islice(it, n), maxlen=n) - if len(window) == n: - yield tuple(window) - for x in it: - window.append(x) - yield tuple(window) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/__about__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/__about__.py deleted file mode 100755 index c359122..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/__about__.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "21.2" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD-2-Clause or Apache-2.0" -__copyright__ = "2014-2019 %s" % __author__ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/__init__.py deleted file mode 100755 index 3c50c5d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -from .__about__ import ( - __author__, - __copyright__, - __email__, - __license__, - __summary__, - __title__, - __uri__, - __version__, -) - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_manylinux.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_manylinux.py deleted file mode 100755 index 4c379aa..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_manylinux.py +++ /dev/null @@ -1,301 +0,0 @@ -import collections -import functools -import os -import re -import struct -import sys -import warnings -from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader: - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file: IO[bytes]) -> None: - def unpack(fmt: str) -> int: - try: - data = file.read(struct.calcsize(fmt)) - result: Tuple[int, ...] = struct.unpack(fmt, data) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result[0] - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header() -> Optional[_ELFFileHeader]: - try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header - - -def _is_linux_armhf() -> bool: - # hard-float ABI can be detected from the ELF header of the running - # process - # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686() -> bool: - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result - - -def _have_compatible_abi(arch: str) -> bool: - if arch == "armv7l": - return _is_linux_armhf() - if arch == "i686": - return _is_linux_i686() - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} - - -# If glibc ever changes its major version, we need to know what the last -# minor version was, so we can build the complete list of all versions. -# For now, guess what the highest minor version might be, assume it will -# be 50 for testing. Once this actually happens, update the dictionary -# with the actual value. -_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) - - -class _GLibCVersion(NamedTuple): - major: int - minor: int - - -def _glibc_version_string_confstr() -> Optional[str]: - """ - Primary implementation of glibc_version_string using os.confstr. - """ - # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely - # to be broken or missing. This strategy is used in the standard library - # platform module. - # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 - try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr("CS_GNU_LIBC_VERSION") - assert version_string is not None - _, version = version_string.split() - except (AssertionError, AttributeError, OSError, ValueError): - # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... - return None - return version - - -def _glibc_version_string_ctypes() -> Optional[str]: - """ - Fallback implementation of glibc_version_string using ctypes. - """ - try: - import ctypes - except ImportError: - return None - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - # - # We must also handle the special case where the executable is not a - # dynamically linked executable. This can occur when using musl libc, - # for example. In this situation, dlopen() will error, leading to an - # OSError. Interestingly, at least in the case of musl, there is no - # errno set on the OSError. The single string argument used to construct - # OSError comes from libc itself and is therefore not portable to - # hard code here. In any case, failure to call dlopen() means we - # can proceed, so we bail on our attempt. - try: - process_namespace = ctypes.CDLL(None) - except OSError: - return None - - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str: str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -def _glibc_version_string() -> Optional[str]: - """Returns glibc version string, or None if not using glibc.""" - return _glibc_version_string_confstr() or _glibc_version_string_ctypes() - - -def _parse_glibc_version(version_str: str) -> Tuple[int, int]: - """Parse glibc version. - - We use a regexp instead of str.split because we want to discard any - random junk that might come after the minor version -- this might happen - in patched/forked versions of glibc (e.g. Linaro's version of glibc - uses version strings like "2.20-2014.11"). See gh-3588. - """ - m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) - if not m: - warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, - RuntimeWarning, - ) - return -1, -1 - return int(m.group("major")), int(m.group("minor")) - - -@functools.lru_cache() -def _get_glibc_version() -> Tuple[int, int]: - version_str = _glibc_version_string() - if version_str is None: - return (-1, -1) - return _parse_glibc_version(version_str) - - -# From PEP 513, PEP 600 -def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: - sys_glibc = _get_glibc_version() - if sys_glibc < version: - return False - # Check for presence of _manylinux module. - try: - import _manylinux # noqa - except ImportError: - return True - if hasattr(_manylinux, "manylinux_compatible"): - result = _manylinux.manylinux_compatible(version[0], version[1], arch) - if result is not None: - return bool(result) - return True - if version == _GLibCVersion(2, 5): - if hasattr(_manylinux, "manylinux1_compatible"): - return bool(_manylinux.manylinux1_compatible) - if version == _GLibCVersion(2, 12): - if hasattr(_manylinux, "manylinux2010_compatible"): - return bool(_manylinux.manylinux2010_compatible) - if version == _GLibCVersion(2, 17): - if hasattr(_manylinux, "manylinux2014_compatible"): - return bool(_manylinux.manylinux2014_compatible) - return True - - -_LEGACY_MANYLINUX_MAP = { - # CentOS 7 w/ glibc 2.17 (PEP 599) - (2, 17): "manylinux2014", - # CentOS 6 w/ glibc 2.12 (PEP 571) - (2, 12): "manylinux2010", - # CentOS 5 w/ glibc 2.5 (PEP 513) - (2, 5): "manylinux1", -} - - -def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(arch): - return - # Oldest glibc to be supported regardless of architecture is (2, 17). - too_old_glibc2 = _GLibCVersion(2, 16) - if arch in {"x86_64", "i686"}: - # On x86/i686 also oldest glibc to be supported is (2, 5). - too_old_glibc2 = _GLibCVersion(2, 4) - current_glibc = _GLibCVersion(*_get_glibc_version()) - glibc_max_list = [current_glibc] - # We can assume compatibility across glibc major versions. - # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 - # - # Build a list of maximum glibc versions so that we can - # output the canonical list of all glibc from current_glibc - # down to too_old_glibc2, including all intermediary versions. - for glibc_major in range(current_glibc.major - 1, 1, -1): - glibc_minor = _LAST_GLIBC_MINOR[glibc_major] - glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_musllinux.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_musllinux.py deleted file mode 100755 index 85450fa..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_musllinux.py +++ /dev/null @@ -1,136 +0,0 @@ -"""PEP 656 support. - -This module implements logic to detect if the currently running Python is -linked against musl, and what musl version is used. -""" - -import contextlib -import functools -import operator -import os -import re -import struct -import subprocess -import sys -from typing import IO, Iterator, NamedTuple, Optional, Tuple - - -def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: - return struct.unpack(fmt, f.read(struct.calcsize(fmt))) - - -def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: - """Detect musl libc location by parsing the Python executable. - - Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca - ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html - """ - f.seek(0) - try: - ident = _read_unpacked(f, "16B") - except struct.error: - return None - if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. - return None - f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. - - try: - # e_fmt: Format for program header. - # p_fmt: Format for section header. - # p_idx: Indexes to find p_type, p_offset, and p_filesz. - e_fmt, p_fmt, p_idx = { - 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. - 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. - }[ident[4]] - except KeyError: - return None - else: - p_get = operator.itemgetter(*p_idx) - - # Find the interpreter section and return its content. - try: - _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) - except struct.error: - return None - for i in range(e_phnum + 1): - f.seek(e_phoff + e_phentsize * i) - try: - p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) - except struct.error: - return None - if p_type != 3: # Not PT_INTERP. - continue - f.seek(p_offset) - interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") - if "musl" not in interpreter: - return None - return interpreter - return None - - -class _MuslVersion(NamedTuple): - major: int - minor: int - - -def _parse_musl_version(output: str) -> Optional[_MuslVersion]: - lines = [n for n in (n.strip() for n in output.splitlines()) if n] - if len(lines) < 2 or lines[0][:4] != "musl": - return None - m = re.match(r"Version (\d+)\.(\d+)", lines[1]) - if not m: - return None - return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) - - -@functools.lru_cache() -def _get_musl_version(executable: str) -> Optional[_MuslVersion]: - """Detect currently-running musl runtime version. - - This is done by checking the specified executable's dynamic linking - information, and invoking the loader to parse its output for a version - string. If the loader is musl, the output would be something like:: - - musl libc (x86_64) - Version 1.2.2 - Dynamic Program Loader - """ - with contextlib.ExitStack() as stack: - try: - f = stack.enter_context(open(executable, "rb")) - except IOError: - return None - ld = _parse_ld_musl_from_elf(f) - if not ld: - return None - proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) - return _parse_musl_version(proc.stderr) - - -def platform_tags(arch: str) -> Iterator[str]: - """Generate musllinux tags compatible to the current platform. - - :param arch: Should be the part of platform tag after the ``linux_`` - prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a - prerequisite for the current platform to be musllinux-compatible. - - :returns: An iterator of compatible musllinux tags. - """ - sys_musl = _get_musl_version(sys.executable) - if sys_musl is None: # Python not dynamically linked against musl. - return - for minor in range(sys_musl.minor, -1, -1): - yield f"musllinux_{sys_musl.major}_{minor}_{arch}" - - -if __name__ == "__main__": # pragma: no cover - import sysconfig - - plat = sysconfig.get_platform() - assert plat.startswith("linux-"), "not linux" - - print("plat:", plat) - print("musl:", _get_musl_version(sys.executable)) - print("tags:", end=" ") - for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): - print(t, end="\n ") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_structures.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_structures.py deleted file mode 100755 index 9515497..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/_structures.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - - -class InfinityType: - def __repr__(self) -> str: - return "Infinity" - - def __hash__(self) -> int: - return hash(repr(self)) - - def __lt__(self, other: object) -> bool: - return False - - def __le__(self, other: object) -> bool: - return False - - def __eq__(self, other: object) -> bool: - return isinstance(other, self.__class__) - - def __ne__(self, other: object) -> bool: - return not isinstance(other, self.__class__) - - def __gt__(self, other: object) -> bool: - return True - - def __ge__(self, other: object) -> bool: - return True - - def __neg__(self: object) -> "NegativeInfinityType": - return NegativeInfinity - - -Infinity = InfinityType() - - -class NegativeInfinityType: - def __repr__(self) -> str: - return "-Infinity" - - def __hash__(self) -> int: - return hash(repr(self)) - - def __lt__(self, other: object) -> bool: - return True - - def __le__(self, other: object) -> bool: - return True - - def __eq__(self, other: object) -> bool: - return isinstance(other, self.__class__) - - def __ne__(self, other: object) -> bool: - return not isinstance(other, self.__class__) - - def __gt__(self, other: object) -> bool: - return False - - def __ge__(self, other: object) -> bool: - return False - - def __neg__(self: object) -> InfinityType: - return Infinity - - -NegativeInfinity = NegativeInfinityType() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/markers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/markers.py deleted file mode 100755 index 18769b0..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/markers.py +++ /dev/null @@ -1,304 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import operator -import os -import platform -import sys -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -from pkg_resources.extern.pyparsing import ( # noqa: N817 - Forward, - Group, - Literal as L, - ParseException, - ParseResults, - QuotedString, - ZeroOrMore, - stringEnd, - stringStart, -) - -from .specifiers import InvalidSpecifier, Specifier - -__all__ = [ - "InvalidMarker", - "UndefinedComparison", - "UndefinedEnvironmentName", - "Marker", - "default_environment", -] - -Operator = Callable[[str, str], bool] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node: - def __init__(self, value: Any) -> None: - self.value = value - - def __str__(self) -> str: - return str(self.value) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}('{self}')>" - - def serialize(self) -> str: - raise NotImplementedError - - -class Variable(Node): - def serialize(self) -> str: - return str(self) - - -class Value(Node): - def serialize(self) -> str: - return f'"{self}"' - - -class Op(Node): - def serialize(self) -> str: - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") # PEP-345 - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # undocumented setuptools legacy - | L("extra") # PEP-508 -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker( - marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True -) -> str: - - assert isinstance(marker, (list, tuple, str)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if ( - isinstance(marker, list) - and len(marker) == 1 - and isinstance(marker[0], (list, tuple)) - ): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators: Dict[str, Operator] = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs: str, op: Op, rhs: str) -> bool: - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper: Optional[Operator] = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") - - return oper(lhs, rhs) - - -class Undefined: - pass - - -_undefined = Undefined() - - -def _get_env(environment: Dict[str, str], name: str) -> str: - value: Union[str, Undefined] = environment.get(name, _undefined) - - if isinstance(value, Undefined): - raise UndefinedEnvironmentName( - f"{name!r} does not exist in evaluation environment." - ) - - return value - - -def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: - groups: List[List[bool]] = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, str)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info: "sys._version_info") -> str: - version = "{0.major}.{0.minor}.{0.micro}".format(info) - kind = info.releaselevel - if kind != "final": - version += kind[0] + str(info.serial) - return version - - -def default_environment() -> Dict[str, str]: - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": ".".join(platform.python_version_tuple()[:2]), - "sys_platform": sys.platform, - } - - -class Marker: - def __init__(self, marker: str) -> None: - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - raise InvalidMarker( - f"Invalid marker: {marker!r}, parse error at " - f"{marker[e.loc : e.loc + 8]!r}" - ) - - def __str__(self) -> str: - return _format_marker(self._markers) - - def __repr__(self) -> str: - return f"" - - def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/requirements.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/requirements.py deleted file mode 100755 index 6af14ec..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/requirements.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -import string -import urllib.parse -from typing import List, Optional as TOptional, Set - -from pkg_resources.extern.pyparsing import ( # noqa - Combine, - Literal as L, - Optional, - ParseException, - Regex, - Word, - ZeroOrMore, - originalTextFor, - stringEnd, - stringStart, -) - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - - -class InvalidRequirement(ValueError): - """ - An invalid requirement was found, users should refer to PEP 508. - """ - - -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r"[^ ]+")("url") -URL = AT + URI - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine( - VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False -)("_raw_spec") -_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start : t._original_end]) -) -MARKER_SEPARATOR = SEMICOLON -MARKER = MARKER_SEPARATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd -# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see -# issue #104 -REQUIREMENT.parseString("x[]") - - -class Requirement: - """Parse a requirement. - - Parse a given requirement string into its parts, such as name, specifier, - URL, and extras. Raises InvalidRequirement on a badly-formed requirement - string. - """ - - # TODO: Can we test whether something is contained within a requirement? - # If so how do we do that? Do we need to test against the _name_ of - # the thing as well as the version? What about the markers? - # TODO: Can we normalize the name and extra name? - - def __init__(self, requirement_string: str) -> None: - try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' - ) - - self.name: str = req.name - if req.url: - parsed_url = urllib.parse.urlparse(req.url) - if parsed_url.scheme == "file": - if urllib.parse.urlunparse(parsed_url) != req.url: - raise InvalidRequirement("Invalid URL given") - elif not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc - ): - raise InvalidRequirement(f"Invalid URL: {req.url}") - self.url: TOptional[str] = req.url - else: - self.url = None - self.extras: Set[str] = set(req.extras.asList() if req.extras else []) - self.specifier: SpecifierSet = SpecifierSet(req.specifier) - self.marker: TOptional[Marker] = req.marker if req.marker else None - - def __str__(self) -> str: - parts: List[str] = [self.name] - - if self.extras: - formatted_extras = ",".join(sorted(self.extras)) - parts.append(f"[{formatted_extras}]") - - if self.specifier: - parts.append(str(self.specifier)) - - if self.url: - parts.append(f"@ {self.url}") - if self.marker: - parts.append(" ") - - if self.marker: - parts.append(f"; {self.marker}") - - return "".join(parts) - - def __repr__(self) -> str: - return f"" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/specifiers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/specifiers.py deleted file mode 100755 index ce66bd4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/specifiers.py +++ /dev/null @@ -1,828 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import abc -import functools -import itertools -import re -import warnings -from typing import ( - Callable, - Dict, - Iterable, - Iterator, - List, - Optional, - Pattern, - Set, - Tuple, - TypeVar, - Union, -) - -from .utils import canonicalize_version -from .version import LegacyVersion, Version, parse - -ParsedVersion = Union[Version, LegacyVersion] -UnparsedVersion = Union[Version, LegacyVersion, str] -VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) -CallableOperator = Callable[[ParsedVersion, str], bool] - - -class InvalidSpecifier(ValueError): - """ - An invalid specifier was found, users should refer to PEP 440. - """ - - -class BaseSpecifier(metaclass=abc.ABCMeta): - @abc.abstractmethod - def __str__(self) -> str: - """ - Returns the str representation of this Specifier like object. This - should be representative of the Specifier itself. - """ - - @abc.abstractmethod - def __hash__(self) -> int: - """ - Returns a hash value for this Specifier like object. - """ - - @abc.abstractmethod - def __eq__(self, other: object) -> bool: - """ - Returns a boolean representing whether or not the two Specifier like - objects are equal. - """ - - @abc.abstractmethod - def __ne__(self, other: object) -> bool: - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - - @abc.abstractproperty - def prereleases(self) -> Optional[bool]: - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @prereleases.setter - def prereleases(self, value: bool) -> None: - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @abc.abstractmethod - def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: - """ - Determines if the given item is contained within this specifier. - """ - - @abc.abstractmethod - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - """ - Takes an iterable of items and filters them so that only items which - are contained within this specifier are allowed in it. - """ - - -class _IndividualSpecifier(BaseSpecifier): - - _operators: Dict[str, str] = {} - _regex: Pattern[str] - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier(f"Invalid specifier: '{spec}'") - - self._spec: Tuple[str, str] = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre) - - def __str__(self) -> str: - return "{}{}".format(*self._spec) - - @property - def _canonical_spec(self) -> Tuple[str, str]: - return self._spec[0], canonicalize_version(self._spec[1]) - - def __hash__(self) -> int: - return hash(self._canonical_spec) - - def __eq__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._canonical_spec == other._canonical_spec - - def __ne__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - - def _get_operator(self, op: str) -> CallableOperator: - operator_callable: CallableOperator = getattr( - self, f"_compare_{self._operators[op]}" - ) - return operator_callable - - def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self) -> str: - return self._spec[0] - - @property - def version(self) -> str: - return self._spec[1] - - @property - def prereleases(self) -> Optional[bool]: - return self._prereleases - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: str) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - normalized_item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if normalized_item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - operator_callable: CallableOperator = self._get_operator(self.operator) - return operator_callable(normalized_item, self.version) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later in case nothing - # else matches this specifier. - if parsed_version.is_prerelease and not ( - prereleases or self.prereleases - ): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the beginning. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(==|!=|<=|>=|<|>)) - \s* - (?P - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - super().__init__(spec, prereleases) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal( - self, prospective: LegacyVersion, spec: str - ) -> bool: - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective > self._coerce_version(spec) - - -def _require_version_compare( - fn: Callable[["Specifier", ParsedVersion, str], bool] -) -> Callable[["Specifier", ParsedVersion, str], bool]: - @functools.wraps(fn) - def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(~=|==|!=|<=|>=|<|>|===)) - (?P - (?: - # The identity operators allow for an escape hatch that will - # do an exact string match of the version you wish to install. - # This will not be parsed by PEP 440 and we cannot determine - # any semantic meaning from it. This operator is discouraged - # but included entirely as an escape hatch. - (?<====) # Only match for the identity operator - \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. - ) - | - (?: - # The (non)equality operators allow for wild card and local - # versions to be specified so we have to define these two - # operators separately to enable that. - (?<===|!=) # Only match for equals and not equals - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. - (?: - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* - )? - ) - | - (?: - # The compatible operator requires at least two digits in the - # release segment. - (?<=~=) # Only match for the compatible operator - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - | - (?: - # All other operators only allow a sub set of what the - # (non)equality operators do. Specifically they do not allow - # local versions to be specified nor do they allow the prefix - # matching wild cards. - (?=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - "===": "arbitrary", - } - - @_require_version_compare - def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: - - # Compatible releases have an equivalent combination of >= and ==. That - # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to - # implement this in terms of the other specifiers instead of - # implementing it ourselves. The only thing we need to do is construct - # the other specifiers. - - # We want everything but the last item in the version, but we want to - # ignore suffix segments. - prefix = ".".join( - list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] - ) - - # Add the prefix notation to the end of our string - prefix += ".*" - - return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( - prospective, prefix - ) - - @_require_version_compare - def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: - - # We need special logic to handle prefix matching - if spec.endswith(".*"): - # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. - split_spec = _version_split(spec[:-2]) # Remove the trailing .* - - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. - split_prospective = _version_split(str(prospective)) - - # Shorten the prospective version to be the same length as the spec - # so that we can determine if the specifier is a prefix of the - # prospective version or not. - shortened_prospective = split_prospective[: len(split_spec)] - - # Pad out our two sides with zeros so that they both equal the same - # length. - padded_spec, padded_prospective = _pad_version( - split_spec, shortened_prospective - ) - - return padded_prospective == padded_spec - else: - # Convert our spec string into a Version - spec_version = Version(spec) - - # If the specifier does not have a local segment, then we want to - # act as if the prospective version also does not have a local - # segment. - if not spec_version.local: - prospective = Version(prospective.public) - - return prospective == spec_version - - @_require_version_compare - def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: - return not self._compare_equal(prospective, spec) - - @_require_version_compare - def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: - - # NB: Local version identifiers are NOT permitted in the version - # specifier, so local version labels can be universally removed from - # the prospective version. - return Version(prospective.public) <= Version(spec) - - @_require_version_compare - def _compare_greater_than_equal( - self, prospective: ParsedVersion, spec: str - ) -> bool: - - # NB: Local version identifiers are NOT permitted in the version - # specifier, so local version labels can be universally removed from - # the prospective version. - return Version(prospective.public) >= Version(spec) - - @_require_version_compare - def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: - - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec_str) - - # Check to see if the prospective version is less than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective < spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a pre-release version, that we do not accept pre-release - # versions for the version mentioned in the specifier (e.g. <3.1 should - # not match 3.1.dev0, but should match 3.0.dev0). - if not spec.is_prerelease and prospective.is_prerelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # less than the spec version *and* it's not a pre-release of the same - # version in the spec. - return True - - @_require_version_compare - def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: - - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec_str) - - # Check to see if the prospective version is greater than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective > spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a post-release version, that we do not accept - # post-release versions for the version mentioned in the specifier - # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). - if not spec.is_postrelease and prospective.is_postrelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is technically greater than, to match. - if prospective.local is not None: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # greater than the spec version *and* it's not a pre-release of the - # same version in the spec. - return True - - def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: - return str(prospective).lower() == str(spec).lower() - - @property - def prereleases(self) -> bool: - - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases - - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] - - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True - - return False - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - -_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") - - -def _version_split(version: str) -> List[str]: - result: List[str] = [] - for item in version.split("."): - match = _prefix_regex.search(item) - if match: - result.extend(match.groups()) - else: - result.append(item) - return result - - -def _is_not_suffix(segment: str) -> bool: - return not any( - segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") - ) - - -def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: - left_split, right_split = [], [] - - # Get the release segment of our versions - left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) - right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) - - # Get the rest of our versions - left_split.append(left[len(left_split[0]) :]) - right_split.append(right[len(right_split[0]) :]) - - # Insert our padding - left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) - right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) - - return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) - - -class SpecifierSet(BaseSpecifier): - def __init__( - self, specifiers: str = "", prereleases: Optional[bool] = None - ) -> None: - - # Split on , to break each individual specifier into it's own item, and - # strip each item to remove leading/trailing whitespace. - split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - - # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed: Set[_IndividualSpecifier] = set() - for specifier in split_specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) - - # Store our prereleases value so we can use it later to determine if - # we accept prereleases or not. - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return "".format(str(self), pre) - - def __str__(self) -> str: - return ",".join(sorted(str(s) for s in self._specs)) - - def __hash__(self) -> int: - return hash(self._specs) - - def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": - if isinstance(other, str): - other = SpecifierSet(other) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - specifier = SpecifierSet() - specifier._specs = frozenset(self._specs | other._specs) - - if self._prereleases is None and other._prereleases is not None: - specifier._prereleases = other._prereleases - elif self._prereleases is not None and other._prereleases is None: - specifier._prereleases = self._prereleases - elif self._prereleases == other._prereleases: - specifier._prereleases = self._prereleases - else: - raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." - ) - - return specifier - - def __eq__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs == other._specs - - def __ne__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - - def __len__(self) -> int: - return len(self._specs) - - def __iter__(self) -> Iterator[_IndividualSpecifier]: - return iter(self._specs) - - @property - def prereleases(self) -> Optional[bool]: - - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: UnparsedVersion) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # We can determine if we're going to allow pre-releases by looking to - # see if any of the underlying items supports them. If none of them do - # and this item is a pre-release then we do not allow it and we can - # short circuit that here. - # Note: This means that 1.0.dev1 would not be contained in something - # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 - if not prereleases and item.is_prerelease: - return False - - # We simply dispatch to the underlying specs here to make sure that the - # given version is contained within all of them. - # Note: This use of all() here means that an empty set of specifiers - # will always return True, this is an explicit design decision. - return all(s.contains(item, prereleases=prereleases) for s in self._specs) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # If we have any specifiers, then we want to wrap our iterable in the - # filter method for each one, this will act as a logical AND amongst - # each specifier. - if self._specs: - for spec in self._specs: - iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable - # If we do not have any specifiers, then we need to have a rough filter - # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. - else: - filtered: List[VersionTypeVar] = [] - found_prereleases: List[VersionTypeVar] = [] - - item: UnparsedVersion - parsed_version: Union[Version, LegacyVersion] - - for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue - - # Store any item which is a pre-release for later unless we've - # already found a final version or we are accepting prereleases - if parsed_version.is_prerelease and not prereleases: - if not filtered: - found_prereleases.append(item) - else: - filtered.append(item) - - # If we've found no items except for pre-releases, then we'll go - # ahead and use the pre-releases - if not filtered and found_prereleases and prereleases is None: - return found_prereleases - - return filtered diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/tags.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/tags.py deleted file mode 100755 index e65890a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/tags.py +++ /dev/null @@ -1,484 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import logging -import platform -import sys -import sysconfig -from importlib.machinery import EXTENSION_SUFFIXES -from typing import ( - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Optional, - Sequence, - Tuple, - Union, - cast, -) - -from . import _manylinux, _musllinux - -logger = logging.getLogger(__name__) - -PythonVersion = Sequence[int] -MacVersion = Tuple[int, int] - -INTERPRETER_SHORT_NAMES: Dict[str, str] = { - "python": "py", # Generic. - "cpython": "cp", - "pypy": "pp", - "ironpython": "ip", - "jython": "jy", -} - - -_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 - - -class Tag: - """ - A representation of the tag triple for a wheel. - - Instances are considered immutable and thus are hashable. Equality checking - is also supported. - """ - - __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] - - def __init__(self, interpreter: str, abi: str, platform: str) -> None: - self._interpreter = interpreter.lower() - self._abi = abi.lower() - self._platform = platform.lower() - # The __hash__ of every single element in a Set[Tag] will be evaluated each time - # that a set calls its `.disjoint()` method, which may be called hundreds of - # times when scanning a page of links for packages with tags matching that - # Set[Tag]. Pre-computing the value here produces significant speedups for - # downstream consumers. - self._hash = hash((self._interpreter, self._abi, self._platform)) - - @property - def interpreter(self) -> str: - return self._interpreter - - @property - def abi(self) -> str: - return self._abi - - @property - def platform(self) -> str: - return self._platform - - def __eq__(self, other: object) -> bool: - if not isinstance(other, Tag): - return NotImplemented - - return ( - (self._hash == other._hash) # Short-circuit ASAP for perf reasons. - and (self._platform == other._platform) - and (self._abi == other._abi) - and (self._interpreter == other._interpreter) - ) - - def __hash__(self) -> int: - return self._hash - - def __str__(self) -> str: - return f"{self._interpreter}-{self._abi}-{self._platform}" - - def __repr__(self) -> str: - return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) - - -def parse_tag(tag: str) -> FrozenSet[Tag]: - """ - Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. - - Returning a set is required due to the possibility that the tag is a - compressed tag set. - """ - tags = set() - interpreters, abis, platforms = tag.split("-") - for interpreter in interpreters.split("."): - for abi in abis.split("."): - for platform_ in platforms.split("."): - tags.add(Tag(interpreter, abi, platform_)) - return frozenset(tags) - - -def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: - value = sysconfig.get_config_var(name) - if value is None and warn: - logger.debug( - "Config variable '%s' is unset, Python ABI tag may be incorrect", name - ) - return value - - -def _normalize_string(string: str) -> str: - return string.replace(".", "_").replace("-", "_") - - -def _abi3_applies(python_version: PythonVersion) -> bool: - """ - Determine if the Python version supports abi3. - - PEP 384 was first implemented in Python 3.2. - """ - return len(python_version) > 1 and tuple(python_version) >= (3, 2) - - -def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: - py_version = tuple(py_version) # To allow for version comparison. - abis = [] - version = _version_nodot(py_version[:2]) - debug = pymalloc = ucs4 = "" - with_debug = _get_config_var("Py_DEBUG", warn) - has_refcount = hasattr(sys, "gettotalrefcount") - # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled - # extension modules is the best option. - # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 - has_ext = "_d.pyd" in EXTENSION_SUFFIXES - if with_debug or (with_debug is None and (has_refcount or has_ext)): - debug = "d" - if py_version < (3, 8): - with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) - if with_pymalloc or with_pymalloc is None: - pymalloc = "m" - if py_version < (3, 3): - unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) - if unicode_size == 4 or ( - unicode_size is None and sys.maxunicode == 0x10FFFF - ): - ucs4 = "u" - elif debug: - # Debug builds can also load "normal" extension modules. - # We can also assume no UCS-4 or pymalloc requirement. - abis.append(f"cp{version}") - abis.insert( - 0, - "cp{version}{debug}{pymalloc}{ucs4}".format( - version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 - ), - ) - return abis - - -def cpython_tags( - python_version: Optional[PythonVersion] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a CPython interpreter. - - The tags consist of: - - cp-- - - cp-abi3- - - cp-none- - - cp-abi3- # Older Python versions down to 3.2. - - If python_version only specifies a major version then user-provided ABIs and - the 'none' ABItag will be used. - - If 'abi3' or 'none' are specified in 'abis' then they will be yielded at - their normal position and not at the beginning. - """ - if not python_version: - python_version = sys.version_info[:2] - - interpreter = "cp{}".format(_version_nodot(python_version[:2])) - - if abis is None: - if len(python_version) > 1: - abis = _cpython_abis(python_version, warn) - else: - abis = [] - abis = list(abis) - # 'abi3' and 'none' are explicitly handled later. - for explicit_abi in ("abi3", "none"): - try: - abis.remove(explicit_abi) - except ValueError: - pass - - platforms = list(platforms or platform_tags()) - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - if _abi3_applies(python_version): - yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) - yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) - - if _abi3_applies(python_version): - for minor_version in range(python_version[1] - 1, 1, -1): - for platform_ in platforms: - interpreter = "cp{version}".format( - version=_version_nodot((python_version[0], minor_version)) - ) - yield Tag(interpreter, "abi3", platform_) - - -def _generic_abi() -> Iterator[str]: - abi = sysconfig.get_config_var("SOABI") - if abi: - yield _normalize_string(abi) - - -def generic_tags( - interpreter: Optional[str] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a generic interpreter. - - The tags consist of: - - -- - - The "none" ABI will be added if it was not explicitly provided. - """ - if not interpreter: - interp_name = interpreter_name() - interp_version = interpreter_version(warn=warn) - interpreter = "".join([interp_name, interp_version]) - if abis is None: - abis = _generic_abi() - platforms = list(platforms or platform_tags()) - abis = list(abis) - if "none" not in abis: - abis.append("none") - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - - -def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: - """ - Yields Python versions in descending order. - - After the latest version, the major-only version will be yielded, and then - all previous versions of that major version. - """ - if len(py_version) > 1: - yield "py{version}".format(version=_version_nodot(py_version[:2])) - yield "py{major}".format(major=py_version[0]) - if len(py_version) > 1: - for minor in range(py_version[1] - 1, -1, -1): - yield "py{version}".format(version=_version_nodot((py_version[0], minor))) - - -def compatible_tags( - python_version: Optional[PythonVersion] = None, - interpreter: Optional[str] = None, - platforms: Optional[Iterable[str]] = None, -) -> Iterator[Tag]: - """ - Yields the sequence of tags that are compatible with a specific version of Python. - - The tags consist of: - - py*-none- - - -none-any # ... if `interpreter` is provided. - - py*-none-any - """ - if not python_version: - python_version = sys.version_info[:2] - platforms = list(platforms or platform_tags()) - for version in _py_interpreter_range(python_version): - for platform_ in platforms: - yield Tag(version, "none", platform_) - if interpreter: - yield Tag(interpreter, "none", "any") - for version in _py_interpreter_range(python_version): - yield Tag(version, "none", "any") - - -def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: - if not is_32bit: - return arch - - if arch.startswith("ppc"): - return "ppc" - - return "i386" - - -def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: - formats = [cpu_arch] - if cpu_arch == "x86_64": - if version < (10, 4): - return [] - formats.extend(["intel", "fat64", "fat32"]) - - elif cpu_arch == "i386": - if version < (10, 4): - return [] - formats.extend(["intel", "fat32", "fat"]) - - elif cpu_arch == "ppc64": - # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? - if version > (10, 5) or version < (10, 4): - return [] - formats.append("fat64") - - elif cpu_arch == "ppc": - if version > (10, 6): - return [] - formats.extend(["fat32", "fat"]) - - if cpu_arch in {"arm64", "x86_64"}: - formats.append("universal2") - - if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: - formats.append("universal") - - return formats - - -def mac_platforms( - version: Optional[MacVersion] = None, arch: Optional[str] = None -) -> Iterator[str]: - """ - Yields the platform tags for a macOS system. - - The `version` parameter is a two-item tuple specifying the macOS version to - generate platform tags for. The `arch` parameter is the CPU architecture to - generate platform tags for. Both parameters default to the appropriate value - for the current system. - """ - version_str, _, cpu_arch = platform.mac_ver() - if version is None: - version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) - else: - version = version - if arch is None: - arch = _mac_arch(cpu_arch) - else: - arch = arch - - if (10, 0) <= version and version < (11, 0): - # Prior to Mac OS 11, each yearly release of Mac OS bumped the - # "minor" version number. The major version was always 10. - for minor_version in range(version[1], -1, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=10, minor=minor_version, binary_format=binary_format - ) - - if version >= (11, 0): - # Starting with Mac OS 11, each yearly release bumps the major version - # number. The minor versions are now the midyear updates. - for major_version in range(version[0], 10, -1): - compat_version = major_version, 0 - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=major_version, minor=0, binary_format=binary_format - ) - - if version >= (11, 0): - # Mac OS 11 on x86_64 is compatible with binaries from previous releases. - # Arm64 support was introduced in 11.0, so no Arm binaries from previous - # releases exist. - # - # However, the "universal2" binary format can have a - # macOS version earlier than 11.0 when the x86_64 part of the binary supports - # that version of macOS. - if arch == "x86_64": - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - else: - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_format = "universal2" - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - - -def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: - linux = _normalize_string(sysconfig.get_platform()) - if is_32bit: - if linux == "linux_x86_64": - linux = "linux_i686" - elif linux == "linux_aarch64": - linux = "linux_armv7l" - _, arch = linux.split("_", 1) - yield from _manylinux.platform_tags(linux, arch) - yield from _musllinux.platform_tags(arch) - yield linux - - -def _generic_platforms() -> Iterator[str]: - yield _normalize_string(sysconfig.get_platform()) - - -def platform_tags() -> Iterator[str]: - """ - Provides the platform tags for this installation. - """ - if platform.system() == "Darwin": - return mac_platforms() - elif platform.system() == "Linux": - return _linux_platforms() - else: - return _generic_platforms() - - -def interpreter_name() -> str: - """ - Returns the name of the running interpreter. - """ - name = sys.implementation.name - return INTERPRETER_SHORT_NAMES.get(name) or name - - -def interpreter_version(*, warn: bool = False) -> str: - """ - Returns the version of the running interpreter. - """ - version = _get_config_var("py_version_nodot", warn=warn) - if version: - version = str(version) - else: - version = _version_nodot(sys.version_info[:2]) - return version - - -def _version_nodot(version: PythonVersion) -> str: - return "".join(map(str, version)) - - -def sys_tags(*, warn: bool = False) -> Iterator[Tag]: - """ - Returns the sequence of tag triples for the running interpreter. - - The order of the sequence corresponds to priority order for the - interpreter, from most to least important. - """ - - interp_name = interpreter_name() - if interp_name == "cp": - yield from cpython_tags(warn=warn) - else: - yield from generic_tags() - - yield from compatible_tags() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/utils.py deleted file mode 100755 index bab11b8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/utils.py +++ /dev/null @@ -1,136 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -from typing import FrozenSet, NewType, Tuple, Union, cast - -from .tags import Tag, parse_tag -from .version import InvalidVersion, Version - -BuildTag = Union[Tuple[()], Tuple[int, str]] -NormalizedName = NewType("NormalizedName", str) - - -class InvalidWheelFilename(ValueError): - """ - An invalid wheel filename was found, users should refer to PEP 427. - """ - - -class InvalidSdistFilename(ValueError): - """ - An invalid sdist filename was found, users should refer to the packaging user guide. - """ - - -_canonicalize_regex = re.compile(r"[-_.]+") -# PEP 427: The build number must start with a digit. -_build_tag_regex = re.compile(r"(\d+)(.*)") - - -def canonicalize_name(name: str) -> NormalizedName: - # This is taken from PEP 503. - value = _canonicalize_regex.sub("-", name).lower() - return cast(NormalizedName, value) - - -def canonicalize_version(version: Union[Version, str]) -> str: - """ - This is very similar to Version.__str__, but has one subtle difference - with the way it handles the release segment. - """ - if isinstance(version, str): - try: - parsed = Version(version) - except InvalidVersion: - # Legacy versions cannot be normalized - return version - else: - parsed = version - - parts = [] - - # Epoch - if parsed.epoch != 0: - parts.append(f"{parsed.epoch}!") - - # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) - - # Pre-release - if parsed.pre is not None: - parts.append("".join(str(x) for x in parsed.pre)) - - # Post-release - if parsed.post is not None: - parts.append(f".post{parsed.post}") - - # Development release - if parsed.dev is not None: - parts.append(f".dev{parsed.dev}") - - # Local version segment - if parsed.local is not None: - parts.append(f"+{parsed.local}") - - return "".join(parts) - - -def parse_wheel_filename( - filename: str, -) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: - if not filename.endswith(".whl"): - raise InvalidWheelFilename( - f"Invalid wheel filename (extension must be '.whl'): {filename}" - ) - - filename = filename[:-4] - dashes = filename.count("-") - if dashes not in (4, 5): - raise InvalidWheelFilename( - f"Invalid wheel filename (wrong number of parts): {filename}" - ) - - parts = filename.split("-", dashes - 2) - name_part = parts[0] - # See PEP 427 for the rules on escaping the project name - if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: - raise InvalidWheelFilename(f"Invalid project name: {filename}") - name = canonicalize_name(name_part) - version = Version(parts[1]) - if dashes == 5: - build_part = parts[2] - build_match = _build_tag_regex.match(build_part) - if build_match is None: - raise InvalidWheelFilename( - f"Invalid build number: {build_part} in '{filename}'" - ) - build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) - else: - build = () - tags = parse_tag(parts[-1]) - return (name, version, build, tags) - - -def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: - if filename.endswith(".tar.gz"): - file_stem = filename[: -len(".tar.gz")] - elif filename.endswith(".zip"): - file_stem = filename[: -len(".zip")] - else: - raise InvalidSdistFilename( - f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" - f" {filename}" - ) - - # We are requiring a PEP 440 version, which cannot contain dashes, - # so we split on the last dash. - name_part, sep, version_part = file_stem.rpartition("-") - if not sep: - raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") - - name = canonicalize_name(name_part) - version = Version(version_part) - return (name, version) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/version.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/version.py deleted file mode 100755 index de9a09a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/packaging/version.py +++ /dev/null @@ -1,504 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import collections -import itertools -import re -import warnings -from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union - -from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType - -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] - -InfiniteTypes = Union[InfinityType, NegativeInfinityType] -PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] -SubLocalType = Union[InfiniteTypes, int, str] -LocalType = Union[ - NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], -] -CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType -] -LegacyCmpKey = Tuple[int, Tuple[str, ...]] -VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool -] - -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) - - -def parse(version: str) -> Union["LegacyVersion", "Version"]: - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion: - _key: Union[CmpKey, LegacyCmpKey] - - def __hash__(self) -> int: - return hash(self._key) - - # Please keep the duplicated `isinstance` check - # in the six comparisons hereunder - # unless you find a way to avoid adding overhead function calls. - def __lt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key < other._key - - def __le__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key <= other._key - - def __eq__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key == other._key - - def __ge__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key >= other._key - - def __gt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key > other._key - - def __ne__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key != other._key - - -class LegacyVersion(_BaseVersion): - def __init__(self, version: str) -> None: - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def __str__(self) -> str: - return self._version - - def __repr__(self) -> str: - return f"" - - @property - def public(self) -> str: - return self._version - - @property - def base_version(self) -> str: - return self._version - - @property - def epoch(self) -> int: - return -1 - - @property - def release(self) -> None: - return None - - @property - def pre(self) -> None: - return None - - @property - def post(self) -> None: - return None - - @property - def dev(self) -> None: - return None - - @property - def local(self) -> None: - return None - - @property - def is_prerelease(self) -> bool: - return False - - @property - def is_postrelease(self) -> bool: - return False - - @property - def is_devrelease(self) -> bool: - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s: str) -> Iterator[str]: - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version: str) -> LegacyCmpKey: - - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts: List[str] = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - - return epoch, tuple(parts) - - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P[0-9]+)!)? # epoch - (?P[0-9]+(?:\.[0-9]+)*) # release segment - (?P
                                          # pre-release
-            [-_\.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-        (?P                                         # post release
-            (?:-(?P[0-9]+))
-            |
-            (?:
-                [-_\.]?
-                (?Ppost|rev|r)
-                [-_\.]?
-                (?P[0-9]+)?
-            )
-        )?
-        (?P                                          # dev release
-            [-_\.]?
-            (?Pdev)
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-    )
-    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
-"""
-
-
-class Version(_BaseVersion):
-
-    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
-    def __init__(self, version: str) -> None:
-
-        # Validate the version and parse it into pieces
-        match = self._regex.search(version)
-        if not match:
-            raise InvalidVersion(f"Invalid version: '{version}'")
-
-        # Store the parsed out pieces of the version
-        self._version = _Version(
-            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
-            release=tuple(int(i) for i in match.group("release").split(".")),
-            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
-            post=_parse_letter_version(
-                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
-            ),
-            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
-            local=_parse_local_version(match.group("local")),
-        )
-
-        # Generate a key which will be used for sorting
-        self._key = _cmpkey(
-            self._version.epoch,
-            self._version.release,
-            self._version.pre,
-            self._version.post,
-            self._version.dev,
-            self._version.local,
-        )
-
-    def __repr__(self) -> str:
-        return f""
-
-    def __str__(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        # Pre-release
-        if self.pre is not None:
-            parts.append("".join(str(x) for x in self.pre))
-
-        # Post-release
-        if self.post is not None:
-            parts.append(f".post{self.post}")
-
-        # Development release
-        if self.dev is not None:
-            parts.append(f".dev{self.dev}")
-
-        # Local version segment
-        if self.local is not None:
-            parts.append(f"+{self.local}")
-
-        return "".join(parts)
-
-    @property
-    def epoch(self) -> int:
-        _epoch: int = self._version.epoch
-        return _epoch
-
-    @property
-    def release(self) -> Tuple[int, ...]:
-        _release: Tuple[int, ...] = self._version.release
-        return _release
-
-    @property
-    def pre(self) -> Optional[Tuple[str, int]]:
-        _pre: Optional[Tuple[str, int]] = self._version.pre
-        return _pre
-
-    @property
-    def post(self) -> Optional[int]:
-        return self._version.post[1] if self._version.post else None
-
-    @property
-    def dev(self) -> Optional[int]:
-        return self._version.dev[1] if self._version.dev else None
-
-    @property
-    def local(self) -> Optional[str]:
-        if self._version.local:
-            return ".".join(str(x) for x in self._version.local)
-        else:
-            return None
-
-    @property
-    def public(self) -> str:
-        return str(self).split("+", 1)[0]
-
-    @property
-    def base_version(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        return "".join(parts)
-
-    @property
-    def is_prerelease(self) -> bool:
-        return self.dev is not None or self.pre is not None
-
-    @property
-    def is_postrelease(self) -> bool:
-        return self.post is not None
-
-    @property
-    def is_devrelease(self) -> bool:
-        return self.dev is not None
-
-    @property
-    def major(self) -> int:
-        return self.release[0] if len(self.release) >= 1 else 0
-
-    @property
-    def minor(self) -> int:
-        return self.release[1] if len(self.release) >= 2 else 0
-
-    @property
-    def micro(self) -> int:
-        return self.release[2] if len(self.release) >= 3 else 0
-
-
-def _parse_letter_version(
-    letter: str, number: Union[str, bytes, SupportsInt]
-) -> Optional[Tuple[str, int]]:
-
-    if letter:
-        # We consider there to be an implicit 0 in a pre-release if there is
-        # not a numeral associated with it.
-        if number is None:
-            number = 0
-
-        # We normalize any letters to their lower case form
-        letter = letter.lower()
-
-        # We consider some words to be alternate spellings of other words and
-        # in those cases we want to normalize the spellings to our preferred
-        # spelling.
-        if letter == "alpha":
-            letter = "a"
-        elif letter == "beta":
-            letter = "b"
-        elif letter in ["c", "pre", "preview"]:
-            letter = "rc"
-        elif letter in ["rev", "r"]:
-            letter = "post"
-
-        return letter, int(number)
-    if not letter and number:
-        # We assume if we are given a number, but we are not given a letter
-        # then this is using the implicit post release syntax (e.g. 1.0-1)
-        letter = "post"
-
-        return letter, int(number)
-
-    return None
-
-
-_local_version_separators = re.compile(r"[\._-]")
-
-
-def _parse_local_version(local: str) -> Optional[LocalType]:
-    """
-    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
-    """
-    if local is not None:
-        return tuple(
-            part.lower() if not part.isdigit() else int(part)
-            for part in _local_version_separators.split(local)
-        )
-    return None
-
-
-def _cmpkey(
-    epoch: int,
-    release: Tuple[int, ...],
-    pre: Optional[Tuple[str, int]],
-    post: Optional[Tuple[str, int]],
-    dev: Optional[Tuple[str, int]],
-    local: Optional[Tuple[SubLocalType]],
-) -> CmpKey:
-
-    # When we compare a release version, we want to compare it with all of the
-    # trailing zeros removed. So we'll use a reverse the list, drop all the now
-    # leading zeros until we come to something non zero, then take the rest
-    # re-reverse it back into the correct order and make it a tuple and use
-    # that for our sorting key.
-    _release = tuple(
-        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
-    )
-
-    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
-    # We'll do this by abusing the pre segment, but we _only_ want to do this
-    # if there is not a pre or a post segment. If we have one of those then
-    # the normal sorting rules will handle this case correctly.
-    if pre is None and post is None and dev is not None:
-        _pre: PrePostDevType = NegativeInfinity
-    # Versions without a pre-release (except as noted above) should sort after
-    # those with one.
-    elif pre is None:
-        _pre = Infinity
-    else:
-        _pre = pre
-
-    # Versions without a post segment should sort before those with one.
-    if post is None:
-        _post: PrePostDevType = NegativeInfinity
-
-    else:
-        _post = post
-
-    # Versions without a development segment should sort after those with one.
-    if dev is None:
-        _dev: PrePostDevType = Infinity
-
-    else:
-        _dev = dev
-
-    if local is None:
-        # Versions without a local segment should sort before those with one.
-        _local: LocalType = NegativeInfinity
-    else:
-        # Versions with a local segment need that segment parsed to implement
-        # the sorting rules in PEP440.
-        # - Alpha numeric segments sort before numeric segments
-        # - Alpha numeric segments sort lexicographically
-        # - Numeric segments sort numerically
-        # - Shorter versions sort before longer versions when the prefixes
-        #   match exactly
-        _local = tuple(
-            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
-        )
-
-    return epoch, _release, _pre, _post, _dev, _local
diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/pyparsing.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/pyparsing.py
deleted file mode 100755
index cf75e1e..0000000
--- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/pyparsing.py
+++ /dev/null
@@ -1,5742 +0,0 @@
-# module pyparsing.py
-#
-# Copyright (c) 2003-2018  Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = \
-"""
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and executing simple grammars,
-vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
-don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
-provides a library of classes that you use to construct the grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form 
-C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements 
-(L{'+'} operator gives L{And} expressions, strings are auto-converted to
-L{Literal} expressions)::
-
-    from pyparsing import Word, alphas
-
-    # define grammar of a greeting
-    greet = Word(alphas) + "," + Word(alphas) + "!"
-
-    hello = "Hello, World!"
-    print (hello, "->", greet.parseString(hello))
-
-The program outputs the following::
-
-    Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the self-explanatory
-class names, and the use of '+', '|' and '^' operators.
-
-The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an
-object with named attributes.
-
-The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
- - quoted strings
- - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
- - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
- - construct character word-group expressions using the L{Word} class
- - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
- - use L{'+'}, L{'|'}, L{'^'}, and L{'&'} operators to combine simple expressions into more complex ones
- - associate names with your parsed results using L{ParserElement.setResultsName}
- - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
- - find more useful common expressions in the L{pyparsing_common} namespace class
-"""
-
-__version__ = "2.2.1"
-__versionTime__ = "18 Sep 2018 00:49 UTC"
-__author__ = "Paul McGuire "
-
-import string
-from weakref import ref as wkref
-import copy
-import sys
-import warnings
-import re
-import sre_constants
-import collections
-import pprint
-import traceback
-import types
-from datetime import datetime
-
-try:
-    from _thread import RLock
-except ImportError:
-    from threading import RLock
-
-try:
-    # Python 3
-    from collections.abc import Iterable
-    from collections.abc import MutableMapping
-except ImportError:
-    # Python 2.7
-    from collections import Iterable
-    from collections import MutableMapping
-
-try:
-    from collections import OrderedDict as _OrderedDict
-except ImportError:
-    try:
-        from ordereddict import OrderedDict as _OrderedDict
-    except ImportError:
-        _OrderedDict = None
-
-#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
-
-__all__ = [
-'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
-'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
-'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
-'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
-'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
-'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
-'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
-'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
-'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
-'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
-'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
-'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
-'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
-'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
-'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
-'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
-'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
-'CloseMatch', 'tokenMap', 'pyparsing_common',
-]
-
-system_version = tuple(sys.version_info)[:3]
-PY_3 = system_version[0] == 3
-if PY_3:
-    _MAX_INT = sys.maxsize
-    basestring = str
-    unichr = chr
-    _ustr = str
-
-    # build list of single arg builtins, that can be used as parse actions
-    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
-
-else:
-    _MAX_INT = sys.maxint
-    range = xrange
-
-    def _ustr(obj):
-        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
-           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
-           then < returns the unicode object | encodes it with the default encoding | ... >.
-        """
-        if isinstance(obj,unicode):
-            return obj
-
-        try:
-            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
-            # it won't break any existing code.
-            return str(obj)
-
-        except UnicodeEncodeError:
-            # Else encode it
-            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
-            xmlcharref = Regex(r'&#\d+;')
-            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
-            return xmlcharref.transformString(ret)
-
-    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
-    singleArgBuiltins = []
-    import __builtin__
-    for fname in "sum len sorted reversed list tuple set any all min max".split():
-        try:
-            singleArgBuiltins.append(getattr(__builtin__,fname))
-        except AttributeError:
-            continue
-            
-_generatorType = type((y for y in range(1)))
- 
-def _xml_escape(data):
-    """Escape &, <, >, ", ', etc. in a string of data."""
-
-    # ampersand must be replaced first
-    from_symbols = '&><"\''
-    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
-    for from_,to_ in zip(from_symbols, to_symbols):
-        data = data.replace(from_, to_)
-    return data
-
-class _Constants(object):
-    pass
-
-alphas     = string.ascii_uppercase + string.ascii_lowercase
-nums       = "0123456789"
-hexnums    = nums + "ABCDEFabcdef"
-alphanums  = alphas + nums
-_bslash    = chr(92)
-printables = "".join(c for c in string.printable if c not in string.whitespace)
-
-class ParseBaseException(Exception):
-    """base exception class for all parsing runtime exceptions"""
-    # Performance tuning: we construct a *lot* of these, so keep this
-    # constructor as small and fast as possible
-    def __init__( self, pstr, loc=0, msg=None, elem=None ):
-        self.loc = loc
-        if msg is None:
-            self.msg = pstr
-            self.pstr = ""
-        else:
-            self.msg = msg
-            self.pstr = pstr
-        self.parserElement = elem
-        self.args = (pstr, loc, msg)
-
-    @classmethod
-    def _from_exception(cls, pe):
-        """
-        internal factory method to simplify creating one type of ParseException 
-        from another - avoids having __init__ signature conflicts among subclasses
-        """
-        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
-
-    def __getattr__( self, aname ):
-        """supported attributes by name are:
-            - lineno - returns the line number of the exception text
-            - col - returns the column number of the exception text
-            - line - returns the line containing the exception text
-        """
-        if( aname == "lineno" ):
-            return lineno( self.loc, self.pstr )
-        elif( aname in ("col", "column") ):
-            return col( self.loc, self.pstr )
-        elif( aname == "line" ):
-            return line( self.loc, self.pstr )
-        else:
-            raise AttributeError(aname)
-
-    def __str__( self ):
-        return "%s (at char %d), (line:%d, col:%d)" % \
-                ( self.msg, self.loc, self.lineno, self.column )
-    def __repr__( self ):
-        return _ustr(self)
-    def markInputline( self, markerString = ">!<" ):
-        """Extracts the exception line from the input string, and marks
-           the location of the exception with a special symbol.
-        """
-        line_str = self.line
-        line_column = self.column - 1
-        if markerString:
-            line_str = "".join((line_str[:line_column],
-                                markerString, line_str[line_column:]))
-        return line_str.strip()
-    def __dir__(self):
-        return "lineno col line".split() + dir(type(self))
-
-class ParseException(ParseBaseException):
-    """
-    Exception thrown when parse expressions don't match class;
-    supported attributes by name are:
-     - lineno - returns the line number of the exception text
-     - col - returns the column number of the exception text
-     - line - returns the line containing the exception text
-        
-    Example::
-        try:
-            Word(nums).setName("integer").parseString("ABC")
-        except ParseException as pe:
-            print(pe)
-            print("column: {}".format(pe.col))
-            
-    prints::
-       Expected integer (at char 0), (line:1, col:1)
-        column: 1
-    """
-    pass
-
-class ParseFatalException(ParseBaseException):
-    """user-throwable exception thrown when inconsistent parse content
-       is found; stops all parsing immediately"""
-    pass
-
-class ParseSyntaxException(ParseFatalException):
-    """just like L{ParseFatalException}, but thrown internally when an
-       L{ErrorStop} ('-' operator) indicates that parsing is to stop 
-       immediately because an unbacktrackable syntax error has been found"""
-    pass
-
-#~ class ReparseException(ParseBaseException):
-    #~ """Experimental class - parse actions can raise this exception to cause
-       #~ pyparsing to reparse the input string:
-        #~ - with a modified input string, and/or
-        #~ - with a modified start location
-       #~ Set the values of the ReparseException in the constructor, and raise the
-       #~ exception in a parse action to cause pyparsing to use the new string/location.
-       #~ Setting the values as None causes no change to be made.
-       #~ """
-    #~ def __init_( self, newstring, restartLoc ):
-        #~ self.newParseText = newstring
-        #~ self.reparseLoc = restartLoc
-
-class RecursiveGrammarException(Exception):
-    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
-    def __init__( self, parseElementList ):
-        self.parseElementTrace = parseElementList
-
-    def __str__( self ):
-        return "RecursiveGrammarException: %s" % self.parseElementTrace
-
-class _ParseResultsWithOffset(object):
-    def __init__(self,p1,p2):
-        self.tup = (p1,p2)
-    def __getitem__(self,i):
-        return self.tup[i]
-    def __repr__(self):
-        return repr(self.tup[0])
-    def setOffset(self,i):
-        self.tup = (self.tup[0],i)
-
-class ParseResults(object):
-    """
-    Structured parse results, to provide multiple means of access to the parsed data:
-       - as a list (C{len(results)})
-       - by list index (C{results[0], results[1]}, etc.)
-       - by attribute (C{results.} - see L{ParserElement.setResultsName})
-
-    Example::
-        integer = Word(nums)
-        date_str = (integer.setResultsName("year") + '/' 
-                        + integer.setResultsName("month") + '/' 
-                        + integer.setResultsName("day"))
-        # equivalent form:
-        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-        # parseString returns a ParseResults object
-        result = date_str.parseString("1999/12/31")
-
-        def test(s, fn=repr):
-            print("%s -> %s" % (s, fn(eval(s))))
-        test("list(result)")
-        test("result[0]")
-        test("result['month']")
-        test("result.day")
-        test("'month' in result")
-        test("'minutes' in result")
-        test("result.dump()", str)
-    prints::
-        list(result) -> ['1999', '/', '12', '/', '31']
-        result[0] -> '1999'
-        result['month'] -> '12'
-        result.day -> '31'
-        'month' in result -> True
-        'minutes' in result -> False
-        result.dump() -> ['1999', '/', '12', '/', '31']
-        - day: 31
-        - month: 12
-        - year: 1999
-    """
-    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
-        if isinstance(toklist, cls):
-            return toklist
-        retobj = object.__new__(cls)
-        retobj.__doinit = True
-        return retobj
-
-    # Performance tuning: we construct a *lot* of these, so keep this
-    # constructor as small and fast as possible
-    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
-        if self.__doinit:
-            self.__doinit = False
-            self.__name = None
-            self.__parent = None
-            self.__accumNames = {}
-            self.__asList = asList
-            self.__modal = modal
-            if toklist is None:
-                toklist = []
-            if isinstance(toklist, list):
-                self.__toklist = toklist[:]
-            elif isinstance(toklist, _generatorType):
-                self.__toklist = list(toklist)
-            else:
-                self.__toklist = [toklist]
-            self.__tokdict = dict()
-
-        if name is not None and name:
-            if not modal:
-                self.__accumNames[name] = 0
-            if isinstance(name,int):
-                name = _ustr(name) # will always return a str, but use _ustr for consistency
-            self.__name = name
-            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
-                if isinstance(toklist,basestring):
-                    toklist = [ toklist ]
-                if asList:
-                    if isinstance(toklist,ParseResults):
-                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
-                    else:
-                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
-                    self[name].__name = name
-                else:
-                    try:
-                        self[name] = toklist[0]
-                    except (KeyError,TypeError,IndexError):
-                        self[name] = toklist
-
-    def __getitem__( self, i ):
-        if isinstance( i, (int,slice) ):
-            return self.__toklist[i]
-        else:
-            if i not in self.__accumNames:
-                return self.__tokdict[i][-1][0]
-            else:
-                return ParseResults([ v[0] for v in self.__tokdict[i] ])
-
-    def __setitem__( self, k, v, isinstance=isinstance ):
-        if isinstance(v,_ParseResultsWithOffset):
-            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
-            sub = v[0]
-        elif isinstance(k,(int,slice)):
-            self.__toklist[k] = v
-            sub = v
-        else:
-            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
-            sub = v
-        if isinstance(sub,ParseResults):
-            sub.__parent = wkref(self)
-
-    def __delitem__( self, i ):
-        if isinstance(i,(int,slice)):
-            mylen = len( self.__toklist )
-            del self.__toklist[i]
-
-            # convert int to slice
-            if isinstance(i, int):
-                if i < 0:
-                    i += mylen
-                i = slice(i, i+1)
-            # get removed indices
-            removed = list(range(*i.indices(mylen)))
-            removed.reverse()
-            # fixup indices in token dictionary
-            for name,occurrences in self.__tokdict.items():
-                for j in removed:
-                    for k, (value, position) in enumerate(occurrences):
-                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
-        else:
-            del self.__tokdict[i]
-
-    def __contains__( self, k ):
-        return k in self.__tokdict
-
-    def __len__( self ): return len( self.__toklist )
-    def __bool__(self): return ( not not self.__toklist )
-    __nonzero__ = __bool__
-    def __iter__( self ): return iter( self.__toklist )
-    def __reversed__( self ): return iter( self.__toklist[::-1] )
-    def _iterkeys( self ):
-        if hasattr(self.__tokdict, "iterkeys"):
-            return self.__tokdict.iterkeys()
-        else:
-            return iter(self.__tokdict)
-
-    def _itervalues( self ):
-        return (self[k] for k in self._iterkeys())
-            
-    def _iteritems( self ):
-        return ((k, self[k]) for k in self._iterkeys())
-
-    if PY_3:
-        keys = _iterkeys       
-        """Returns an iterator of all named result keys (Python 3.x only)."""
-
-        values = _itervalues
-        """Returns an iterator of all named result values (Python 3.x only)."""
-
-        items = _iteritems
-        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
-
-    else:
-        iterkeys = _iterkeys
-        """Returns an iterator of all named result keys (Python 2.x only)."""
-
-        itervalues = _itervalues
-        """Returns an iterator of all named result values (Python 2.x only)."""
-
-        iteritems = _iteritems
-        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
-
-        def keys( self ):
-            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.iterkeys())
-
-        def values( self ):
-            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.itervalues())
-                
-        def items( self ):
-            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.iteritems())
-
-    def haskeys( self ):
-        """Since keys() returns an iterator, this method is helpful in bypassing
-           code that looks for the existence of any defined results names."""
-        return bool(self.__tokdict)
-        
-    def pop( self, *args, **kwargs):
-        """
-        Removes and returns item at specified index (default=C{last}).
-        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
-        argument or an integer argument, it will use C{list} semantics
-        and pop tokens from the list of parsed tokens. If passed a 
-        non-integer argument (most likely a string), it will use C{dict}
-        semantics and pop the corresponding value from any defined 
-        results names. A second default return value argument is 
-        supported, just as in C{dict.pop()}.
-
-        Example::
-            def remove_first(tokens):
-                tokens.pop(0)
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
-
-            label = Word(alphas)
-            patt = label("LABEL") + OneOrMore(Word(nums))
-            print(patt.parseString("AAB 123 321").dump())
-
-            # Use pop() in a parse action to remove named result (note that corresponding value is not
-            # removed from list form of results)
-            def remove_LABEL(tokens):
-                tokens.pop("LABEL")
-                return tokens
-            patt.addParseAction(remove_LABEL)
-            print(patt.parseString("AAB 123 321").dump())
-        prints::
-            ['AAB', '123', '321']
-            - LABEL: AAB
-
-            ['AAB', '123', '321']
-        """
-        if not args:
-            args = [-1]
-        for k,v in kwargs.items():
-            if k == 'default':
-                args = (args[0], v)
-            else:
-                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
-        if (isinstance(args[0], int) or 
-                        len(args) == 1 or 
-                        args[0] in self):
-            index = args[0]
-            ret = self[index]
-            del self[index]
-            return ret
-        else:
-            defaultvalue = args[1]
-            return defaultvalue
-
-    def get(self, key, defaultValue=None):
-        """
-        Returns named result matching the given key, or if there is no
-        such name, then returns the given C{defaultValue} or C{None} if no
-        C{defaultValue} is specified.
-
-        Similar to C{dict.get()}.
-        
-        Example::
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
-
-            result = date_str.parseString("1999/12/31")
-            print(result.get("year")) # -> '1999'
-            print(result.get("hour", "not specified")) # -> 'not specified'
-            print(result.get("hour")) # -> None
-        """
-        if key in self:
-            return self[key]
-        else:
-            return defaultValue
-
-    def insert( self, index, insStr ):
-        """
-        Inserts new element at location index in the list of parsed tokens.
-        
-        Similar to C{list.insert()}.
-
-        Example::
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
-            # use a parse action to insert the parse location in the front of the parsed results
-            def insert_locn(locn, tokens):
-                tokens.insert(0, locn)
-            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
-        """
-        self.__toklist.insert(index, insStr)
-        # fixup indices in token dictionary
-        for name,occurrences in self.__tokdict.items():
-            for k, (value, position) in enumerate(occurrences):
-                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
-
-    def append( self, item ):
-        """
-        Add single element to end of ParseResults list of elements.
-
-        Example::
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-            
-            # use a parse action to compute the sum of the parsed integers, and add it to the end
-            def append_sum(tokens):
-                tokens.append(sum(map(int, tokens)))
-            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
-        """
-        self.__toklist.append(item)
-
-    def extend( self, itemseq ):
-        """
-        Add sequence of elements to end of ParseResults list of elements.
-
-        Example::
-            patt = OneOrMore(Word(alphas))
-            
-            # use a parse action to append the reverse of the matched strings, to make a palindrome
-            def make_palindrome(tokens):
-                tokens.extend(reversed([t[::-1] for t in tokens]))
-                return ''.join(tokens)
-            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
-        """
-        if isinstance(itemseq, ParseResults):
-            self += itemseq
-        else:
-            self.__toklist.extend(itemseq)
-
-    def clear( self ):
-        """
-        Clear all elements and results names.
-        """
-        del self.__toklist[:]
-        self.__tokdict.clear()
-
-    def __getattr__( self, name ):
-        try:
-            return self[name]
-        except KeyError:
-            return ""
-            
-        if name in self.__tokdict:
-            if name not in self.__accumNames:
-                return self.__tokdict[name][-1][0]
-            else:
-                return ParseResults([ v[0] for v in self.__tokdict[name] ])
-        else:
-            return ""
-
-    def __add__( self, other ):
-        ret = self.copy()
-        ret += other
-        return ret
-
-    def __iadd__( self, other ):
-        if other.__tokdict:
-            offset = len(self.__toklist)
-            addoffset = lambda a: offset if a<0 else a+offset
-            otheritems = other.__tokdict.items()
-            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
-                                for (k,vlist) in otheritems for v in vlist]
-            for k,v in otherdictitems:
-                self[k] = v
-                if isinstance(v[0],ParseResults):
-                    v[0].__parent = wkref(self)
-            
-        self.__toklist += other.__toklist
-        self.__accumNames.update( other.__accumNames )
-        return self
-
-    def __radd__(self, other):
-        if isinstance(other,int) and other == 0:
-            # useful for merging many ParseResults using sum() builtin
-            return self.copy()
-        else:
-            # this may raise a TypeError - so be it
-            return other + self
-        
-    def __repr__( self ):
-        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
-
-    def __str__( self ):
-        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
-
-    def _asStringList( self, sep='' ):
-        out = []
-        for item in self.__toklist:
-            if out and sep:
-                out.append(sep)
-            if isinstance( item, ParseResults ):
-                out += item._asStringList()
-            else:
-                out.append( _ustr(item) )
-        return out
-
-    def asList( self ):
-        """
-        Returns the parse results as a nested list of matching tokens, all converted to strings.
-
-        Example::
-            patt = OneOrMore(Word(alphas))
-            result = patt.parseString("sldkj lsdkj sldkj")
-            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
-            print(type(result), result) # ->  ['sldkj', 'lsdkj', 'sldkj']
-            
-            # Use asList() to create an actual list
-            result_list = result.asList()
-            print(type(result_list), result_list) # ->  ['sldkj', 'lsdkj', 'sldkj']
-        """
-        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
-
-    def asDict( self ):
-        """
-        Returns the named parse results as a nested dictionary.
-
-        Example::
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-            
-            result = date_str.parseString('12/31/1999')
-            print(type(result), repr(result)) # ->  (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
-            
-            result_dict = result.asDict()
-            print(type(result_dict), repr(result_dict)) # ->  {'day': '1999', 'year': '12', 'month': '31'}
-
-            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
-            import json
-            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
-            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
-        """
-        if PY_3:
-            item_fn = self.items
-        else:
-            item_fn = self.iteritems
-            
-        def toItem(obj):
-            if isinstance(obj, ParseResults):
-                if obj.haskeys():
-                    return obj.asDict()
-                else:
-                    return [toItem(v) for v in obj]
-            else:
-                return obj
-                
-        return dict((k,toItem(v)) for k,v in item_fn())
-
-    def copy( self ):
-        """
-        Returns a new copy of a C{ParseResults} object.
-        """
-        ret = ParseResults( self.__toklist )
-        ret.__tokdict = self.__tokdict.copy()
-        ret.__parent = self.__parent
-        ret.__accumNames.update( self.__accumNames )
-        ret.__name = self.__name
-        return ret
-
-    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
-        """
-        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
-        """
-        nl = "\n"
-        out = []
-        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
-                                                            for v in vlist)
-        nextLevelIndent = indent + "  "
-
-        # collapse out indents if formatting is not desired
-        if not formatted:
-            indent = ""
-            nextLevelIndent = ""
-            nl = ""
-
-        selfTag = None
-        if doctag is not None:
-            selfTag = doctag
-        else:
-            if self.__name:
-                selfTag = self.__name
-
-        if not selfTag:
-            if namedItemsOnly:
-                return ""
-            else:
-                selfTag = "ITEM"
-
-        out += [ nl, indent, "<", selfTag, ">" ]
-
-        for i,res in enumerate(self.__toklist):
-            if isinstance(res,ParseResults):
-                if i in namedItems:
-                    out += [ res.asXML(namedItems[i],
-                                        namedItemsOnly and doctag is None,
-                                        nextLevelIndent,
-                                        formatted)]
-                else:
-                    out += [ res.asXML(None,
-                                        namedItemsOnly and doctag is None,
-                                        nextLevelIndent,
-                                        formatted)]
-            else:
-                # individual token, see if there is a name for it
-                resTag = None
-                if i in namedItems:
-                    resTag = namedItems[i]
-                if not resTag:
-                    if namedItemsOnly:
-                        continue
-                    else:
-                        resTag = "ITEM"
-                xmlBodyText = _xml_escape(_ustr(res))
-                out += [ nl, nextLevelIndent, "<", resTag, ">",
-                                                xmlBodyText,
-                                                "" ]
-
-        out += [ nl, indent, "" ]
-        return "".join(out)
-
-    def __lookup(self,sub):
-        for k,vlist in self.__tokdict.items():
-            for v,loc in vlist:
-                if sub is v:
-                    return k
-        return None
-
-    def getName(self):
-        r"""
-        Returns the results name for this token expression. Useful when several 
-        different expressions might match at a particular location.
-
-        Example::
-            integer = Word(nums)
-            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
-            house_number_expr = Suppress('#') + Word(nums, alphanums)
-            user_data = (Group(house_number_expr)("house_number") 
-                        | Group(ssn_expr)("ssn")
-                        | Group(integer)("age"))
-            user_info = OneOrMore(user_data)
-            
-            result = user_info.parseString("22 111-22-3333 #221B")
-            for item in result:
-                print(item.getName(), ':', item[0])
-        prints::
-            age : 22
-            ssn : 111-22-3333
-            house_number : 221B
-        """
-        if self.__name:
-            return self.__name
-        elif self.__parent:
-            par = self.__parent()
-            if par:
-                return par.__lookup(self)
-            else:
-                return None
-        elif (len(self) == 1 and
-               len(self.__tokdict) == 1 and
-               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
-            return next(iter(self.__tokdict.keys()))
-        else:
-            return None
-
-    def dump(self, indent='', depth=0, full=True):
-        """
-        Diagnostic method for listing out the contents of a C{ParseResults}.
-        Accepts an optional C{indent} argument so that this string can be embedded
-        in a nested display of other data.
-
-        Example::
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-            
-            result = date_str.parseString('12/31/1999')
-            print(result.dump())
-        prints::
-            ['12', '/', '31', '/', '1999']
-            - day: 1999
-            - month: 31
-            - year: 12
-        """
-        out = []
-        NL = '\n'
-        out.append( indent+_ustr(self.asList()) )
-        if full:
-            if self.haskeys():
-                items = sorted((str(k), v) for k,v in self.items())
-                for k,v in items:
-                    if out:
-                        out.append(NL)
-                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
-                    if isinstance(v,ParseResults):
-                        if v:
-                            out.append( v.dump(indent,depth+1) )
-                        else:
-                            out.append(_ustr(v))
-                    else:
-                        out.append(repr(v))
-            elif any(isinstance(vv,ParseResults) for vv in self):
-                v = self
-                for i,vv in enumerate(v):
-                    if isinstance(vv,ParseResults):
-                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
-                    else:
-                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
-            
-        return "".join(out)
-
-    def pprint(self, *args, **kwargs):
-        """
-        Pretty-printer for parsed results as a list, using the C{pprint} module.
-        Accepts additional positional or keyword args as defined for the 
-        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
-
-        Example::
-            ident = Word(alphas, alphanums)
-            num = Word(nums)
-            func = Forward()
-            term = ident | num | Group('(' + func + ')')
-            func <<= ident + Group(Optional(delimitedList(term)))
-            result = func.parseString("fna a,b,(fnb c,d,200),100")
-            result.pprint(width=40)
-        prints::
-            ['fna',
-             ['a',
-              'b',
-              ['(', 'fnb', ['c', 'd', '200'], ')'],
-              '100']]
-        """
-        pprint.pprint(self.asList(), *args, **kwargs)
-
-    # add support for pickle protocol
-    def __getstate__(self):
-        return ( self.__toklist,
-                 ( self.__tokdict.copy(),
-                   self.__parent is not None and self.__parent() or None,
-                   self.__accumNames,
-                   self.__name ) )
-
-    def __setstate__(self,state):
-        self.__toklist = state[0]
-        (self.__tokdict,
-         par,
-         inAccumNames,
-         self.__name) = state[1]
-        self.__accumNames = {}
-        self.__accumNames.update(inAccumNames)
-        if par is not None:
-            self.__parent = wkref(par)
-        else:
-            self.__parent = None
-
-    def __getnewargs__(self):
-        return self.__toklist, self.__name, self.__asList, self.__modal
-
-    def __dir__(self):
-        return (dir(type(self)) + list(self.keys()))
-
-MutableMapping.register(ParseResults)
-
-def col (loc,strg):
-    """Returns current column within a string, counting newlines as line separators.
-   The first column is number 1.
-
-   Note: the default parsing behavior is to expand tabs in the input string
-   before starting the parsing process.  See L{I{ParserElement.parseString}} for more information
-   on parsing strings containing C{}s, and suggested methods to maintain a
-   consistent view of the parsed string, the parse location, and line and column
-   positions within the parsed string.
-   """
-    s = strg
-    return 1 if 0} for more information
-   on parsing strings containing C{}s, and suggested methods to maintain a
-   consistent view of the parsed string, the parse location, and line and column
-   positions within the parsed string.
-   """
-    return strg.count("\n",0,loc) + 1
-
-def line( loc, strg ):
-    """Returns the line of text containing loc within a string, counting newlines as line separators.
-       """
-    lastCR = strg.rfind("\n", 0, loc)
-    nextCR = strg.find("\n", loc)
-    if nextCR >= 0:
-        return strg[lastCR+1:nextCR]
-    else:
-        return strg[lastCR+1:]
-
-def _defaultStartDebugAction( instring, loc, expr ):
-    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
-
-def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
-    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
-
-def _defaultExceptionDebugAction( instring, loc, expr, exc ):
-    print ("Exception raised:" + _ustr(exc))
-
-def nullDebugAction(*args):
-    """'Do-nothing' debug action, to suppress debugging output during parsing."""
-    pass
-
-# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
-#~ 'decorator to trim function calls to match the arity of the target'
-#~ def _trim_arity(func, maxargs=3):
-    #~ if func in singleArgBuiltins:
-        #~ return lambda s,l,t: func(t)
-    #~ limit = 0
-    #~ foundArity = False
-    #~ def wrapper(*args):
-        #~ nonlocal limit,foundArity
-        #~ while 1:
-            #~ try:
-                #~ ret = func(*args[limit:])
-                #~ foundArity = True
-                #~ return ret
-            #~ except TypeError:
-                #~ if limit == maxargs or foundArity:
-                    #~ raise
-                #~ limit += 1
-                #~ continue
-    #~ return wrapper
-
-# this version is Python 2.x-3.x cross-compatible
-'decorator to trim function calls to match the arity of the target'
-def _trim_arity(func, maxargs=2):
-    if func in singleArgBuiltins:
-        return lambda s,l,t: func(t)
-    limit = [0]
-    foundArity = [False]
-    
-    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
-    if system_version[:2] >= (3,5):
-        def extract_stack(limit=0):
-            # special handling for Python 3.5.0 - extra deep call stack by 1
-            offset = -3 if system_version == (3,5,0) else -2
-            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
-            return [frame_summary[:2]]
-        def extract_tb(tb, limit=0):
-            frames = traceback.extract_tb(tb, limit=limit)
-            frame_summary = frames[-1]
-            return [frame_summary[:2]]
-    else:
-        extract_stack = traceback.extract_stack
-        extract_tb = traceback.extract_tb
-    
-    # synthesize what would be returned by traceback.extract_stack at the call to 
-    # user's parse action 'func', so that we don't incur call penalty at parse time
-    
-    LINE_DIFF = 6
-    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
-    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
-    this_line = extract_stack(limit=2)[-1]
-    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
-
-    def wrapper(*args):
-        while 1:
-            try:
-                ret = func(*args[limit[0]:])
-                foundArity[0] = True
-                return ret
-            except TypeError:
-                # re-raise TypeErrors if they did not come from our arity testing
-                if foundArity[0]:
-                    raise
-                else:
-                    try:
-                        tb = sys.exc_info()[-1]
-                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
-                            raise
-                    finally:
-                        del tb
-
-                if limit[0] <= maxargs:
-                    limit[0] += 1
-                    continue
-                raise
-
-    # copy func name to wrapper for sensible debug output
-    func_name = ""
-    try:
-        func_name = getattr(func, '__name__', 
-                            getattr(func, '__class__').__name__)
-    except Exception:
-        func_name = str(func)
-    wrapper.__name__ = func_name
-
-    return wrapper
-
-class ParserElement(object):
-    """Abstract base level parser element class."""
-    DEFAULT_WHITE_CHARS = " \n\t\r"
-    verbose_stacktrace = False
-
-    @staticmethod
-    def setDefaultWhitespaceChars( chars ):
-        r"""
-        Overrides the default whitespace chars
-
-        Example::
-            # default whitespace chars are space,  and newline
-            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
-            
-            # change to just treat newline as significant
-            ParserElement.setDefaultWhitespaceChars(" \t")
-            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
-        """
-        ParserElement.DEFAULT_WHITE_CHARS = chars
-
-    @staticmethod
-    def inlineLiteralsUsing(cls):
-        """
-        Set class to be used for inclusion of string literals into a parser.
-        
-        Example::
-            # default literal class used is Literal
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
-
-
-            # change to Suppress
-            ParserElement.inlineLiteralsUsing(Suppress)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
-        """
-        ParserElement._literalStringClass = cls
-
-    def __init__( self, savelist=False ):
-        self.parseAction = list()
-        self.failAction = None
-        #~ self.name = ""  # don't define self.name, let subclasses try/except upcall
-        self.strRepr = None
-        self.resultsName = None
-        self.saveAsList = savelist
-        self.skipWhitespace = True
-        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
-        self.copyDefaultWhiteChars = True
-        self.mayReturnEmpty = False # used when checking for left-recursion
-        self.keepTabs = False
-        self.ignoreExprs = list()
-        self.debug = False
-        self.streamlined = False
-        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
-        self.errmsg = ""
-        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
-        self.debugActions = ( None, None, None ) #custom debug actions
-        self.re = None
-        self.callPreparse = True # used to avoid redundant calls to preParse
-        self.callDuringTry = False
-
-    def copy( self ):
-        """
-        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
-        for the same parsing pattern, using copies of the original parse element.
-        
-        Example::
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
-            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
-            
-            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
-        prints::
-            [5120, 100, 655360, 268435456]
-        Equivalent form of C{expr.copy()} is just C{expr()}::
-            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
-        """
-        cpy = copy.copy( self )
-        cpy.parseAction = self.parseAction[:]
-        cpy.ignoreExprs = self.ignoreExprs[:]
-        if self.copyDefaultWhiteChars:
-            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
-        return cpy
-
-    def setName( self, name ):
-        """
-        Define name for this expression, makes debugging and exception messages clearer.
-        
-        Example::
-            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
-            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
-        """
-        self.name = name
-        self.errmsg = "Expected " + self.name
-        if hasattr(self,"exception"):
-            self.exception.msg = self.errmsg
-        return self
-
-    def setResultsName( self, name, listAllMatches=False ):
-        """
-        Define name for referencing matching tokens as a nested attribute
-        of the returned parse results.
-        NOTE: this returns a *copy* of the original C{ParserElement} object;
-        this is so that the client can define a basic element, such as an
-        integer, and reference it in multiple places with different names.
-
-        You can also set results names using the abbreviated syntax,
-        C{expr("name")} in place of C{expr.setResultsName("name")} - 
-        see L{I{__call__}<__call__>}.
-
-        Example::
-            date_str = (integer.setResultsName("year") + '/' 
-                        + integer.setResultsName("month") + '/' 
-                        + integer.setResultsName("day"))
-
-            # equivalent form:
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-        """
-        newself = self.copy()
-        if name.endswith("*"):
-            name = name[:-1]
-            listAllMatches=True
-        newself.resultsName = name
-        newself.modalResults = not listAllMatches
-        return newself
-
-    def setBreak(self,breakFlag = True):
-        """Method to invoke the Python pdb debugger when this element is
-           about to be parsed. Set C{breakFlag} to True to enable, False to
-           disable.
-        """
-        if breakFlag:
-            _parseMethod = self._parse
-            def breaker(instring, loc, doActions=True, callPreParse=True):
-                import pdb
-                pdb.set_trace()
-                return _parseMethod( instring, loc, doActions, callPreParse )
-            breaker._originalParseMethod = _parseMethod
-            self._parse = breaker
-        else:
-            if hasattr(self._parse,"_originalParseMethod"):
-                self._parse = self._parse._originalParseMethod
-        return self
-
-    def setParseAction( self, *fns, **kwargs ):
-        """
-        Define one or more actions to perform when successfully matching parse element definition.
-        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
-        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
-         - s   = the original string being parsed (see note below)
-         - loc = the location of the matching substring
-         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
-        If the functions in fns modify the tokens, they can return them as the return
-        value from fn, and the modified list of tokens will replace the original.
-        Otherwise, fn does not need to return any value.
-
-        Optional keyword arguments:
-         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
-
-        Note: the default parsing behavior is to expand tabs in the input string
-        before starting the parsing process.  See L{I{parseString}} for more information
-        on parsing strings containing C{}s, and suggested methods to maintain a
-        consistent view of the parsed string, the parse location, and line and column
-        positions within the parsed string.
-        
-        Example::
-            integer = Word(nums)
-            date_str = integer + '/' + integer + '/' + integer
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
-
-            # use parse action to convert to ints at parse time
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            date_str = integer + '/' + integer + '/' + integer
-
-            # note that integer fields are now ints, not strings
-            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
-        """
-        self.parseAction = list(map(_trim_arity, list(fns)))
-        self.callDuringTry = kwargs.get("callDuringTry", False)
-        return self
-
-    def addParseAction( self, *fns, **kwargs ):
-        """
-        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}}.
-        
-        See examples in L{I{copy}}.
-        """
-        self.parseAction += list(map(_trim_arity, list(fns)))
-        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
-        return self
-
-    def addCondition(self, *fns, **kwargs):
-        """Add a boolean predicate function to expression's list of parse actions. See 
-        L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, 
-        functions passed to C{addCondition} need to return boolean success/fail of the condition.
-
-        Optional keyword arguments:
-         - message = define a custom message to be used in the raised exception
-         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
-         
-        Example::
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            year_int = integer.copy()
-            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
-            date_str = year_int + '/' + integer + '/' + integer
-
-            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
-        """
-        msg = kwargs.get("message", "failed user-defined condition")
-        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
-        for fn in fns:
-            def pa(s,l,t):
-                if not bool(_trim_arity(fn)(s,l,t)):
-                    raise exc_type(s,l,msg)
-            self.parseAction.append(pa)
-        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
-        return self
-
-    def setFailAction( self, fn ):
-        """Define action to perform if parsing fails at this expression.
-           Fail acton fn is a callable function that takes the arguments
-           C{fn(s,loc,expr,err)} where:
-            - s = string being parsed
-            - loc = location where expression match was attempted and failed
-            - expr = the parse expression that failed
-            - err = the exception thrown
-           The function returns no value.  It may throw C{L{ParseFatalException}}
-           if it is desired to stop parsing immediately."""
-        self.failAction = fn
-        return self
-
-    def _skipIgnorables( self, instring, loc ):
-        exprsFound = True
-        while exprsFound:
-            exprsFound = False
-            for e in self.ignoreExprs:
-                try:
-                    while 1:
-                        loc,dummy = e._parse( instring, loc )
-                        exprsFound = True
-                except ParseException:
-                    pass
-        return loc
-
-    def preParse( self, instring, loc ):
-        if self.ignoreExprs:
-            loc = self._skipIgnorables( instring, loc )
-
-        if self.skipWhitespace:
-            wt = self.whiteChars
-            instrlen = len(instring)
-            while loc < instrlen and instring[loc] in wt:
-                loc += 1
-
-        return loc
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        return loc, []
-
-    def postParse( self, instring, loc, tokenlist ):
-        return tokenlist
-
-    #~ @profile
-    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
-        debugging = ( self.debug ) #and doActions )
-
-        if debugging or self.failAction:
-            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
-            if (self.debugActions[0] ):
-                self.debugActions[0]( instring, loc, self )
-            if callPreParse and self.callPreparse:
-                preloc = self.preParse( instring, loc )
-            else:
-                preloc = loc
-            tokensStart = preloc
-            try:
-                try:
-                    loc,tokens = self.parseImpl( instring, preloc, doActions )
-                except IndexError:
-                    raise ParseException( instring, len(instring), self.errmsg, self )
-            except ParseBaseException as err:
-                #~ print ("Exception raised:", err)
-                if self.debugActions[2]:
-                    self.debugActions[2]( instring, tokensStart, self, err )
-                if self.failAction:
-                    self.failAction( instring, tokensStart, self, err )
-                raise
-        else:
-            if callPreParse and self.callPreparse:
-                preloc = self.preParse( instring, loc )
-            else:
-                preloc = loc
-            tokensStart = preloc
-            if self.mayIndexError or preloc >= len(instring):
-                try:
-                    loc,tokens = self.parseImpl( instring, preloc, doActions )
-                except IndexError:
-                    raise ParseException( instring, len(instring), self.errmsg, self )
-            else:
-                loc,tokens = self.parseImpl( instring, preloc, doActions )
-
-        tokens = self.postParse( instring, loc, tokens )
-
-        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
-        if self.parseAction and (doActions or self.callDuringTry):
-            if debugging:
-                try:
-                    for fn in self.parseAction:
-                        tokens = fn( instring, tokensStart, retTokens )
-                        if tokens is not None:
-                            retTokens = ParseResults( tokens,
-                                                      self.resultsName,
-                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
-                                                      modal=self.modalResults )
-                except ParseBaseException as err:
-                    #~ print "Exception raised in user parse action:", err
-                    if (self.debugActions[2] ):
-                        self.debugActions[2]( instring, tokensStart, self, err )
-                    raise
-            else:
-                for fn in self.parseAction:
-                    tokens = fn( instring, tokensStart, retTokens )
-                    if tokens is not None:
-                        retTokens = ParseResults( tokens,
-                                                  self.resultsName,
-                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
-                                                  modal=self.modalResults )
-        if debugging:
-            #~ print ("Matched",self,"->",retTokens.asList())
-            if (self.debugActions[1] ):
-                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
-
-        return loc, retTokens
-
-    def tryParse( self, instring, loc ):
-        try:
-            return self._parse( instring, loc, doActions=False )[0]
-        except ParseFatalException:
-            raise ParseException( instring, loc, self.errmsg, self)
-    
-    def canParseNext(self, instring, loc):
-        try:
-            self.tryParse(instring, loc)
-        except (ParseException, IndexError):
-            return False
-        else:
-            return True
-
-    class _UnboundedCache(object):
-        def __init__(self):
-            cache = {}
-            self.not_in_cache = not_in_cache = object()
-
-            def get(self, key):
-                return cache.get(key, not_in_cache)
-
-            def set(self, key, value):
-                cache[key] = value
-
-            def clear(self):
-                cache.clear()
-                
-            def cache_len(self):
-                return len(cache)
-
-            self.get = types.MethodType(get, self)
-            self.set = types.MethodType(set, self)
-            self.clear = types.MethodType(clear, self)
-            self.__len__ = types.MethodType(cache_len, self)
-
-    if _OrderedDict is not None:
-        class _FifoCache(object):
-            def __init__(self, size):
-                self.not_in_cache = not_in_cache = object()
-
-                cache = _OrderedDict()
-
-                def get(self, key):
-                    return cache.get(key, not_in_cache)
-
-                def set(self, key, value):
-                    cache[key] = value
-                    while len(cache) > size:
-                        try:
-                            cache.popitem(False)
-                        except KeyError:
-                            pass
-
-                def clear(self):
-                    cache.clear()
-
-                def cache_len(self):
-                    return len(cache)
-
-                self.get = types.MethodType(get, self)
-                self.set = types.MethodType(set, self)
-                self.clear = types.MethodType(clear, self)
-                self.__len__ = types.MethodType(cache_len, self)
-
-    else:
-        class _FifoCache(object):
-            def __init__(self, size):
-                self.not_in_cache = not_in_cache = object()
-
-                cache = {}
-                key_fifo = collections.deque([], size)
-
-                def get(self, key):
-                    return cache.get(key, not_in_cache)
-
-                def set(self, key, value):
-                    cache[key] = value
-                    while len(key_fifo) > size:
-                        cache.pop(key_fifo.popleft(), None)
-                    key_fifo.append(key)
-
-                def clear(self):
-                    cache.clear()
-                    key_fifo.clear()
-
-                def cache_len(self):
-                    return len(cache)
-
-                self.get = types.MethodType(get, self)
-                self.set = types.MethodType(set, self)
-                self.clear = types.MethodType(clear, self)
-                self.__len__ = types.MethodType(cache_len, self)
-
-    # argument cache for optimizing repeated calls when backtracking through recursive expressions
-    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
-    packrat_cache_lock = RLock()
-    packrat_cache_stats = [0, 0]
-
-    # this method gets repeatedly called during backtracking with the same arguments -
-    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
-    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
-        HIT, MISS = 0, 1
-        lookup = (self, instring, loc, callPreParse, doActions)
-        with ParserElement.packrat_cache_lock:
-            cache = ParserElement.packrat_cache
-            value = cache.get(lookup)
-            if value is cache.not_in_cache:
-                ParserElement.packrat_cache_stats[MISS] += 1
-                try:
-                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
-                except ParseBaseException as pe:
-                    # cache a copy of the exception, without the traceback
-                    cache.set(lookup, pe.__class__(*pe.args))
-                    raise
-                else:
-                    cache.set(lookup, (value[0], value[1].copy()))
-                    return value
-            else:
-                ParserElement.packrat_cache_stats[HIT] += 1
-                if isinstance(value, Exception):
-                    raise value
-                return (value[0], value[1].copy())
-
-    _parse = _parseNoCache
-
-    @staticmethod
-    def resetCache():
-        ParserElement.packrat_cache.clear()
-        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
-
-    _packratEnabled = False
-    @staticmethod
-    def enablePackrat(cache_size_limit=128):
-        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
-           Repeated parse attempts at the same string location (which happens
-           often in many complex grammars) can immediately return a cached value,
-           instead of re-executing parsing/validating code.  Memoizing is done of
-           both valid results and parsing exceptions.
-           
-           Parameters:
-            - cache_size_limit - (default=C{128}) - if an integer value is provided
-              will limit the size of the packrat cache; if None is passed, then
-              the cache size will be unbounded; if 0 is passed, the cache will
-              be effectively disabled.
-            
-           This speedup may break existing programs that use parse actions that
-           have side-effects.  For this reason, packrat parsing is disabled when
-           you first import pyparsing.  To activate the packrat feature, your
-           program must call the class method C{ParserElement.enablePackrat()}.  If
-           your program uses C{psyco} to "compile as you go", you must call
-           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
-           Python will crash.  For best results, call C{enablePackrat()} immediately
-           after importing pyparsing.
-           
-           Example::
-               import pyparsing
-               pyparsing.ParserElement.enablePackrat()
-        """
-        if not ParserElement._packratEnabled:
-            ParserElement._packratEnabled = True
-            if cache_size_limit is None:
-                ParserElement.packrat_cache = ParserElement._UnboundedCache()
-            else:
-                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
-            ParserElement._parse = ParserElement._parseCache
-
-    def parseString( self, instring, parseAll=False ):
-        """
-        Execute the parse expression with the given string.
-        This is the main interface to the client code, once the complete
-        expression has been built.
-
-        If you want the grammar to require that the entire input string be
-        successfully parsed, then set C{parseAll} to True (equivalent to ending
-        the grammar with C{L{StringEnd()}}).
-
-        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
-        in order to report proper column numbers in parse actions.
-        If the input string contains tabs and
-        the grammar uses parse actions that use the C{loc} argument to index into the
-        string being parsed, you can ensure you have a consistent view of the input
-        string by:
-         - calling C{parseWithTabs} on your grammar before calling C{parseString}
-           (see L{I{parseWithTabs}})
-         - define your parse action using the full C{(s,loc,toks)} signature, and
-           reference the input string using the parse action's C{s} argument
-         - explictly expand the tabs in your input string before calling
-           C{parseString}
-        
-        Example::
-            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
-            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
-        """
-        ParserElement.resetCache()
-        if not self.streamlined:
-            self.streamline()
-            #~ self.saveAsList = True
-        for e in self.ignoreExprs:
-            e.streamline()
-        if not self.keepTabs:
-            instring = instring.expandtabs()
-        try:
-            loc, tokens = self._parse( instring, 0 )
-            if parseAll:
-                loc = self.preParse( instring, loc )
-                se = Empty() + StringEnd()
-                se._parse( instring, loc )
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-        else:
-            return tokens
-
-    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
-        """
-        Scan the input string for expression matches.  Each match will return the
-        matching tokens, start location, and end location.  May be called with optional
-        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
-        C{overlap} is specified, then overlapping matches will be reported.
-
-        Note that the start and end locations are reported relative to the string
-        being parsed.  See L{I{parseString}} for more information on parsing
-        strings with embedded tabs.
-
-        Example::
-            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
-            print(source)
-            for tokens,start,end in Word(alphas).scanString(source):
-                print(' '*start + '^'*(end-start))
-                print(' '*start + tokens[0])
-        
-        prints::
-        
-            sldjf123lsdjjkf345sldkjf879lkjsfd987
-            ^^^^^
-            sldjf
-                    ^^^^^^^
-                    lsdjjkf
-                              ^^^^^^
-                              sldkjf
-                                       ^^^^^^
-                                       lkjsfd
-        """
-        if not self.streamlined:
-            self.streamline()
-        for e in self.ignoreExprs:
-            e.streamline()
-
-        if not self.keepTabs:
-            instring = _ustr(instring).expandtabs()
-        instrlen = len(instring)
-        loc = 0
-        preparseFn = self.preParse
-        parseFn = self._parse
-        ParserElement.resetCache()
-        matches = 0
-        try:
-            while loc <= instrlen and matches < maxMatches:
-                try:
-                    preloc = preparseFn( instring, loc )
-                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
-                except ParseException:
-                    loc = preloc+1
-                else:
-                    if nextLoc > loc:
-                        matches += 1
-                        yield tokens, preloc, nextLoc
-                        if overlap:
-                            nextloc = preparseFn( instring, loc )
-                            if nextloc > loc:
-                                loc = nextLoc
-                            else:
-                                loc += 1
-                        else:
-                            loc = nextLoc
-                    else:
-                        loc = preloc+1
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-
-    def transformString( self, instring ):
-        """
-        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
-        be returned from a parse action.  To use C{transformString}, define a grammar and
-        attach a parse action to it that modifies the returned token list.
-        Invoking C{transformString()} on a target string will then scan for matches,
-        and replace the matched text patterns according to the logic in the parse
-        action.  C{transformString()} returns the resulting transformed string.
-        
-        Example::
-            wd = Word(alphas)
-            wd.setParseAction(lambda toks: toks[0].title())
-            
-            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
-        Prints::
-            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
-        """
-        out = []
-        lastE = 0
-        # force preservation of s, to minimize unwanted transformation of string, and to
-        # keep string locs straight between transformString and scanString
-        self.keepTabs = True
-        try:
-            for t,s,e in self.scanString( instring ):
-                out.append( instring[lastE:s] )
-                if t:
-                    if isinstance(t,ParseResults):
-                        out += t.asList()
-                    elif isinstance(t,list):
-                        out += t
-                    else:
-                        out.append(t)
-                lastE = e
-            out.append(instring[lastE:])
-            out = [o for o in out if o]
-            return "".join(map(_ustr,_flatten(out)))
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-
-    def searchString( self, instring, maxMatches=_MAX_INT ):
-        """
-        Another extension to C{L{scanString}}, simplifying the access to the tokens found
-        to match the given parse expression.  May be called with optional
-        C{maxMatches} argument, to clip searching after 'n' matches are found.
-        
-        Example::
-            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
-            cap_word = Word(alphas.upper(), alphas.lower())
-            
-            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
-
-            # the sum() builtin can be used to merge results into a single ParseResults object
-            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
-        prints::
-            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
-            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
-        """
-        try:
-            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-
-    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
-        """
-        Generator method to split a string using the given expression as a separator.
-        May be called with optional C{maxsplit} argument, to limit the number of splits;
-        and the optional C{includeSeparators} argument (default=C{False}), if the separating
-        matching text should be included in the split results.
-        
-        Example::        
-            punc = oneOf(list(".,;:/-!?"))
-            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
-        prints::
-            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
-        """
-        splits = 0
-        last = 0
-        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
-            yield instring[last:s]
-            if includeSeparators:
-                yield t[0]
-            last = e
-        yield instring[last:]
-
-    def __add__(self, other ):
-        """
-        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
-        converts them to L{Literal}s by default.
-        
-        Example::
-            greet = Word(alphas) + "," + Word(alphas) + "!"
-            hello = "Hello, World!"
-            print (hello, "->", greet.parseString(hello))
-        Prints::
-            Hello, World! -> ['Hello', ',', 'World', '!']
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return And( [ self, other ] )
-
-    def __radd__(self, other ):
-        """
-        Implementation of + operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other + self
-
-    def __sub__(self, other):
-        """
-        Implementation of - operator, returns C{L{And}} with error stop
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return self + And._ErrorStop() + other
-
-    def __rsub__(self, other ):
-        """
-        Implementation of - operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other - self
-
-    def __mul__(self,other):
-        """
-        Implementation of * operator, allows use of C{expr * 3} in place of
-        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
-        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
-        may also include C{None} as in:
-         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
-              to C{expr*n + L{ZeroOrMore}(expr)}
-              (read as "at least n instances of C{expr}")
-         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
-              (read as "0 to n instances of C{expr}")
-         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
-         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
-
-        Note that C{expr*(None,n)} does not raise an exception if
-        more than n exprs exist in the input stream; that is,
-        C{expr*(None,n)} does not enforce a maximum number of expr
-        occurrences.  If this behavior is desired, then write
-        C{expr*(None,n) + ~expr}
-        """
-        if isinstance(other,int):
-            minElements, optElements = other,0
-        elif isinstance(other,tuple):
-            other = (other + (None, None))[:2]
-            if other[0] is None:
-                other = (0, other[1])
-            if isinstance(other[0],int) and other[1] is None:
-                if other[0] == 0:
-                    return ZeroOrMore(self)
-                if other[0] == 1:
-                    return OneOrMore(self)
-                else:
-                    return self*other[0] + ZeroOrMore(self)
-            elif isinstance(other[0],int) and isinstance(other[1],int):
-                minElements, optElements = other
-                optElements -= minElements
-            else:
-                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
-        else:
-            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
-
-        if minElements < 0:
-            raise ValueError("cannot multiply ParserElement by negative value")
-        if optElements < 0:
-            raise ValueError("second tuple value must be greater or equal to first tuple value")
-        if minElements == optElements == 0:
-            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
-
-        if (optElements):
-            def makeOptionalList(n):
-                if n>1:
-                    return Optional(self + makeOptionalList(n-1))
-                else:
-                    return Optional(self)
-            if minElements:
-                if minElements == 1:
-                    ret = self + makeOptionalList(optElements)
-                else:
-                    ret = And([self]*minElements) + makeOptionalList(optElements)
-            else:
-                ret = makeOptionalList(optElements)
-        else:
-            if minElements == 1:
-                ret = self
-            else:
-                ret = And([self]*minElements)
-        return ret
-
-    def __rmul__(self, other):
-        return self.__mul__(other)
-
-    def __or__(self, other ):
-        """
-        Implementation of | operator - returns C{L{MatchFirst}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return MatchFirst( [ self, other ] )
-
-    def __ror__(self, other ):
-        """
-        Implementation of | operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other | self
-
-    def __xor__(self, other ):
-        """
-        Implementation of ^ operator - returns C{L{Or}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return Or( [ self, other ] )
-
-    def __rxor__(self, other ):
-        """
-        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other ^ self
-
-    def __and__(self, other ):
-        """
-        Implementation of & operator - returns C{L{Each}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return Each( [ self, other ] )
-
-    def __rand__(self, other ):
-        """
-        Implementation of & operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other & self
-
-    def __invert__( self ):
-        """
-        Implementation of ~ operator - returns C{L{NotAny}}
-        """
-        return NotAny( self )
-
-    def __call__(self, name=None):
-        """
-        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
-        
-        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
-        passed as C{True}.
-           
-        If C{name} is omitted, same as calling C{L{copy}}.
-
-        Example::
-            # these are equivalent
-            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
-            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
-        """
-        if name is not None:
-            return self.setResultsName(name)
-        else:
-            return self.copy()
-
-    def suppress( self ):
-        """
-        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
-        cluttering up returned output.
-        """
-        return Suppress( self )
-
-    def leaveWhitespace( self ):
-        """
-        Disables the skipping of whitespace before matching the characters in the
-        C{ParserElement}'s defined pattern.  This is normally only used internally by
-        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
-        """
-        self.skipWhitespace = False
-        return self
-
-    def setWhitespaceChars( self, chars ):
-        """
-        Overrides the default whitespace chars
-        """
-        self.skipWhitespace = True
-        self.whiteChars = chars
-        self.copyDefaultWhiteChars = False
-        return self
-
-    def parseWithTabs( self ):
-        """
-        Overrides default behavior to expand C{}s to spaces before parsing the input string.
-        Must be called before C{parseString} when the input grammar contains elements that
-        match C{} characters.
-        """
-        self.keepTabs = True
-        return self
-
-    def ignore( self, other ):
-        """
-        Define expression to be ignored (e.g., comments) while doing pattern
-        matching; may be called repeatedly, to define multiple comment or other
-        ignorable patterns.
-        
-        Example::
-            patt = OneOrMore(Word(alphas))
-            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
-            
-            patt.ignore(cStyleComment)
-            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
-        """
-        if isinstance(other, basestring):
-            other = Suppress(other)
-
-        if isinstance( other, Suppress ):
-            if other not in self.ignoreExprs:
-                self.ignoreExprs.append(other)
-        else:
-            self.ignoreExprs.append( Suppress( other.copy() ) )
-        return self
-
-    def setDebugActions( self, startAction, successAction, exceptionAction ):
-        """
-        Enable display of debugging messages while doing pattern matching.
-        """
-        self.debugActions = (startAction or _defaultStartDebugAction,
-                             successAction or _defaultSuccessDebugAction,
-                             exceptionAction or _defaultExceptionDebugAction)
-        self.debug = True
-        return self
-
-    def setDebug( self, flag=True ):
-        """
-        Enable display of debugging messages while doing pattern matching.
-        Set C{flag} to True to enable, False to disable.
-
-        Example::
-            wd = Word(alphas).setName("alphaword")
-            integer = Word(nums).setName("numword")
-            term = wd | integer
-            
-            # turn on debugging for wd
-            wd.setDebug()
-
-            OneOrMore(term).parseString("abc 123 xyz 890")
-        
-        prints::
-            Match alphaword at loc 0(1,1)
-            Matched alphaword -> ['abc']
-            Match alphaword at loc 3(1,4)
-            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
-            Match alphaword at loc 7(1,8)
-            Matched alphaword -> ['xyz']
-            Match alphaword at loc 11(1,12)
-            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
-            Match alphaword at loc 15(1,16)
-            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
-        The output shown is that produced by the default debug actions - custom debug actions can be
-        specified using L{setDebugActions}. Prior to attempting
-        to match the C{wd} expression, the debugging message C{"Match  at loc (,)"}
-        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
-        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
-        which makes debugging and exception messages easier to understand - for instance, the default
-        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
-        """
-        if flag:
-            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
-        else:
-            self.debug = False
-        return self
-
-    def __str__( self ):
-        return self.name
-
-    def __repr__( self ):
-        return _ustr(self)
-
-    def streamline( self ):
-        self.streamlined = True
-        self.strRepr = None
-        return self
-
-    def checkRecursion( self, parseElementList ):
-        pass
-
-    def validate( self, validateTrace=[] ):
-        """
-        Check defined expressions for valid structure, check for infinite recursive definitions.
-        """
-        self.checkRecursion( [] )
-
-    def parseFile( self, file_or_filename, parseAll=False ):
-        """
-        Execute the parse expression on the given file or filename.
-        If a filename is specified (instead of a file object),
-        the entire file is opened, read, and closed before parsing.
-        """
-        try:
-            file_contents = file_or_filename.read()
-        except AttributeError:
-            with open(file_or_filename, "r") as f:
-                file_contents = f.read()
-        try:
-            return self.parseString(file_contents, parseAll)
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-
-    def __eq__(self,other):
-        if isinstance(other, ParserElement):
-            return self is other or vars(self) == vars(other)
-        elif isinstance(other, basestring):
-            return self.matches(other)
-        else:
-            return super(ParserElement,self)==other
-
-    def __ne__(self,other):
-        return not (self == other)
-
-    def __hash__(self):
-        return hash(id(self))
-
-    def __req__(self,other):
-        return self == other
-
-    def __rne__(self,other):
-        return not (self == other)
-
-    def matches(self, testString, parseAll=True):
-        """
-        Method for quick testing of a parser against a test string. Good for simple 
-        inline microtests of sub expressions while building up larger parser.
-           
-        Parameters:
-         - testString - to test against this expression for a match
-         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
-            
-        Example::
-            expr = Word(nums)
-            assert expr.matches("100")
-        """
-        try:
-            self.parseString(_ustr(testString), parseAll=parseAll)
-            return True
-        except ParseBaseException:
-            return False
-                
-    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
-        """
-        Execute the parse expression on a series of test strings, showing each
-        test, the parsed results or where the parse failed. Quick and easy way to
-        run a parse expression against a list of sample strings.
-           
-        Parameters:
-         - tests - a list of separate test strings, or a multiline string of test strings
-         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
-         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
-              string; pass None to disable comment filtering
-         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
-              if False, only dump nested list
-         - printResults - (default=C{True}) prints test output to stdout
-         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
-
-        Returns: a (success, results) tuple, where success indicates that all tests succeeded
-        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
-        test's output
-        
-        Example::
-            number_expr = pyparsing_common.number.copy()
-
-            result = number_expr.runTests('''
-                # unsigned integer
-                100
-                # negative integer
-                -100
-                # float with scientific notation
-                6.02e23
-                # integer with scientific notation
-                1e-12
-                ''')
-            print("Success" if result[0] else "Failed!")
-
-            result = number_expr.runTests('''
-                # stray character
-                100Z
-                # missing leading digit before '.'
-                -.100
-                # too many '.'
-                3.14.159
-                ''', failureTests=True)
-            print("Success" if result[0] else "Failed!")
-        prints::
-            # unsigned integer
-            100
-            [100]
-
-            # negative integer
-            -100
-            [-100]
-
-            # float with scientific notation
-            6.02e23
-            [6.02e+23]
-
-            # integer with scientific notation
-            1e-12
-            [1e-12]
-
-            Success
-            
-            # stray character
-            100Z
-               ^
-            FAIL: Expected end of text (at char 3), (line:1, col:4)
-
-            # missing leading digit before '.'
-            -.100
-            ^
-            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
-            # too many '.'
-            3.14.159
-                ^
-            FAIL: Expected end of text (at char 4), (line:1, col:5)
-
-            Success
-
-        Each test string must be on a single line. If you want to test a string that spans multiple
-        lines, create a test like this::
-
-            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
-        
-        (Note that this is a raw string literal, you must include the leading 'r'.)
-        """
-        if isinstance(tests, basestring):
-            tests = list(map(str.strip, tests.rstrip().splitlines()))
-        if isinstance(comment, basestring):
-            comment = Literal(comment)
-        allResults = []
-        comments = []
-        success = True
-        for t in tests:
-            if comment is not None and comment.matches(t, False) or comments and not t:
-                comments.append(t)
-                continue
-            if not t:
-                continue
-            out = ['\n'.join(comments), t]
-            comments = []
-            try:
-                t = t.replace(r'\n','\n')
-                result = self.parseString(t, parseAll=parseAll)
-                out.append(result.dump(full=fullDump))
-                success = success and not failureTests
-            except ParseBaseException as pe:
-                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
-                if '\n' in t:
-                    out.append(line(pe.loc, t))
-                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
-                else:
-                    out.append(' '*pe.loc + '^' + fatal)
-                out.append("FAIL: " + str(pe))
-                success = success and failureTests
-                result = pe
-            except Exception as exc:
-                out.append("FAIL-EXCEPTION: " + str(exc))
-                success = success and failureTests
-                result = exc
-
-            if printResults:
-                if fullDump:
-                    out.append('')
-                print('\n'.join(out))
-
-            allResults.append((t, result))
-        
-        return success, allResults
-
-        
-class Token(ParserElement):
-    """
-    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
-    """
-    def __init__( self ):
-        super(Token,self).__init__( savelist=False )
-
-
-class Empty(Token):
-    """
-    An empty token, will always match.
-    """
-    def __init__( self ):
-        super(Empty,self).__init__()
-        self.name = "Empty"
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-
-
-class NoMatch(Token):
-    """
-    A token that will never match.
-    """
-    def __init__( self ):
-        super(NoMatch,self).__init__()
-        self.name = "NoMatch"
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-        self.errmsg = "Unmatchable token"
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
-    """
-    Token to exactly match a specified string.
-    
-    Example::
-        Literal('blah').parseString('blah')  # -> ['blah']
-        Literal('blah').parseString('blahfooblah')  # -> ['blah']
-        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
-    
-    For case-insensitive matching, use L{CaselessLiteral}.
-    
-    For keyword matching (force word break before and after the matched string),
-    use L{Keyword} or L{CaselessKeyword}.
-    """
-    def __init__( self, matchString ):
-        super(Literal,self).__init__()
-        self.match = matchString
-        self.matchLen = len(matchString)
-        try:
-            self.firstMatchChar = matchString[0]
-        except IndexError:
-            warnings.warn("null string passed to Literal; use Empty() instead",
-                            SyntaxWarning, stacklevel=2)
-            self.__class__ = Empty
-        self.name = '"%s"' % _ustr(self.match)
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = False
-        self.mayIndexError = False
-
-    # Performance tuning: this routine gets called a *lot*
-    # if this is a single character match string  and the first character matches,
-    # short-circuit as quickly as possible, and avoid calling startswith
-    #~ @profile
-    def parseImpl( self, instring, loc, doActions=True ):
-        if (instring[loc] == self.firstMatchChar and
-            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
-            return loc+self.matchLen, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-_L = Literal
-ParserElement._literalStringClass = Literal
-
-class Keyword(Token):
-    """
-    Token to exactly match a specified string as a keyword, that is, it must be
-    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
-     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
-     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
-    Accepts two optional constructor arguments in addition to the keyword string:
-     - C{identChars} is a string of characters that would be valid identifier characters,
-          defaulting to all alphanumerics + "_" and "$"
-     - C{caseless} allows case-insensitive matching, default is C{False}.
-       
-    Example::
-        Keyword("start").parseString("start")  # -> ['start']
-        Keyword("start").parseString("starting")  # -> Exception
-
-    For case-insensitive matching, use L{CaselessKeyword}.
-    """
-    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
-
-    def __init__( self, matchString, identChars=None, caseless=False ):
-        super(Keyword,self).__init__()
-        if identChars is None:
-            identChars = Keyword.DEFAULT_KEYWORD_CHARS
-        self.match = matchString
-        self.matchLen = len(matchString)
-        try:
-            self.firstMatchChar = matchString[0]
-        except IndexError:
-            warnings.warn("null string passed to Keyword; use Empty() instead",
-                            SyntaxWarning, stacklevel=2)
-        self.name = '"%s"' % self.match
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = False
-        self.mayIndexError = False
-        self.caseless = caseless
-        if caseless:
-            self.caselessmatch = matchString.upper()
-            identChars = identChars.upper()
-        self.identChars = set(identChars)
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.caseless:
-            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
-                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
-                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
-                return loc+self.matchLen, self.match
-        else:
-            if (instring[loc] == self.firstMatchChar and
-                (self.matchLen==1 or instring.startswith(self.match,loc)) and
-                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
-                (loc == 0 or instring[loc-1] not in self.identChars) ):
-                return loc+self.matchLen, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-
-    def copy(self):
-        c = super(Keyword,self).copy()
-        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
-        return c
-
-    @staticmethod
-    def setDefaultKeywordChars( chars ):
-        """Overrides the default Keyword chars
-        """
-        Keyword.DEFAULT_KEYWORD_CHARS = chars
-
-class CaselessLiteral(Literal):
-    """
-    Token to match a specified string, ignoring case of letters.
-    Note: the matched results will always be in the case of the given
-    match string, NOT the case of the input text.
-
-    Example::
-        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
-        
-    (Contrast with example for L{CaselessKeyword}.)
-    """
-    def __init__( self, matchString ):
-        super(CaselessLiteral,self).__init__( matchString.upper() )
-        # Preserve the defining literal.
-        self.returnString = matchString
-        self.name = "'%s'" % self.returnString
-        self.errmsg = "Expected " + self.name
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if instring[ loc:loc+self.matchLen ].upper() == self.match:
-            return loc+self.matchLen, self.returnString
-        raise ParseException(instring, loc, self.errmsg, self)
-
-class CaselessKeyword(Keyword):
-    """
-    Caseless version of L{Keyword}.
-
-    Example::
-        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
-        
-    (Contrast with example for L{CaselessLiteral}.)
-    """
-    def __init__( self, matchString, identChars=None ):
-        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
-             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
-            return loc+self.matchLen, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-
-class CloseMatch(Token):
-    """
-    A variation on L{Literal} which matches "close" matches, that is, 
-    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
-     - C{match_string} - string to be matched
-     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
-    
-    The results from a successful parse will contain the matched text from the input string and the following named results:
-     - C{mismatches} - a list of the positions within the match_string where mismatches were found
-     - C{original} - the original match_string used to compare against the input string
-    
-    If C{mismatches} is an empty list, then the match was an exact match.
-    
-    Example::
-        patt = CloseMatch("ATCATCGAATGGA")
-        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
-        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
-        # exact match
-        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
-        # close match allowing up to 2 mismatches
-        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
-        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
-    """
-    def __init__(self, match_string, maxMismatches=1):
-        super(CloseMatch,self).__init__()
-        self.name = match_string
-        self.match_string = match_string
-        self.maxMismatches = maxMismatches
-        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
-        self.mayIndexError = False
-        self.mayReturnEmpty = False
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        start = loc
-        instrlen = len(instring)
-        maxloc = start + len(self.match_string)
-
-        if maxloc <= instrlen:
-            match_string = self.match_string
-            match_stringloc = 0
-            mismatches = []
-            maxMismatches = self.maxMismatches
-
-            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
-                src,mat = s_m
-                if src != mat:
-                    mismatches.append(match_stringloc)
-                    if len(mismatches) > maxMismatches:
-                        break
-            else:
-                loc = match_stringloc + 1
-                results = ParseResults([instring[start:loc]])
-                results['original'] = self.match_string
-                results['mismatches'] = mismatches
-                return loc, results
-
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
-    """
-    Token for matching words composed of allowed character sets.
-    Defined with string containing all allowed initial characters,
-    an optional string containing allowed body characters (if omitted,
-    defaults to the initial character set), and an optional minimum,
-    maximum, and/or exact length.  The default value for C{min} is 1 (a
-    minimum value < 1 is not valid); the default values for C{max} and C{exact}
-    are 0, meaning no maximum or exact length restriction. An optional
-    C{excludeChars} parameter can list characters that might be found in 
-    the input C{bodyChars} string; useful to define a word of all printables
-    except for one or two characters, for instance.
-    
-    L{srange} is useful for defining custom character set strings for defining 
-    C{Word} expressions, using range notation from regular expression character sets.
-    
-    A common mistake is to use C{Word} to match a specific literal string, as in 
-    C{Word("Address")}. Remember that C{Word} uses the string argument to define
-    I{sets} of matchable characters. This expression would match "Add", "AAA",
-    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
-    To match an exact literal string, use L{Literal} or L{Keyword}.
-
-    pyparsing includes helper strings for building Words:
-     - L{alphas}
-     - L{nums}
-     - L{alphanums}
-     - L{hexnums}
-     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
-     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
-     - L{printables} (any non-whitespace character)
-
-    Example::
-        # a word composed of digits
-        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-        
-        # a word with a leading capital, and zero or more lowercase
-        capital_word = Word(alphas.upper(), alphas.lower())
-
-        # hostnames are alphanumeric, with leading alpha, and '-'
-        hostname = Word(alphas, alphanums+'-')
-        
-        # roman numeral (not a strict parser, accepts invalid mix of characters)
-        roman = Word("IVXLCDM")
-        
-        # any string of non-whitespace characters, except for ','
-        csv_value = Word(printables, excludeChars=",")
-    """
-    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
-        super(Word,self).__init__()
-        if excludeChars:
-            initChars = ''.join(c for c in initChars if c not in excludeChars)
-            if bodyChars:
-                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
-        self.initCharsOrig = initChars
-        self.initChars = set(initChars)
-        if bodyChars :
-            self.bodyCharsOrig = bodyChars
-            self.bodyChars = set(bodyChars)
-        else:
-            self.bodyCharsOrig = initChars
-            self.bodyChars = set(initChars)
-
-        self.maxSpecified = max > 0
-
-        if min < 1:
-            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.asKeyword = asKeyword
-
-        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
-            if self.bodyCharsOrig == self.initCharsOrig:
-                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
-            elif len(self.initCharsOrig) == 1:
-                self.reString = "%s[%s]*" % \
-                                      (re.escape(self.initCharsOrig),
-                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
-            else:
-                self.reString = "[%s][%s]*" % \
-                                      (_escapeRegexRangeChars(self.initCharsOrig),
-                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
-            if self.asKeyword:
-                self.reString = r"\b"+self.reString+r"\b"
-            try:
-                self.re = re.compile( self.reString )
-            except Exception:
-                self.re = None
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.re:
-            result = self.re.match(instring,loc)
-            if not result:
-                raise ParseException(instring, loc, self.errmsg, self)
-
-            loc = result.end()
-            return loc, result.group()
-
-        if not(instring[ loc ] in self.initChars):
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        start = loc
-        loc += 1
-        instrlen = len(instring)
-        bodychars = self.bodyChars
-        maxloc = start + self.maxLen
-        maxloc = min( maxloc, instrlen )
-        while loc < maxloc and instring[loc] in bodychars:
-            loc += 1
-
-        throwException = False
-        if loc - start < self.minLen:
-            throwException = True
-        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
-            throwException = True
-        if self.asKeyword:
-            if (start>0 and instring[start-1] in bodychars) or (loc4:
-                    return s[:4]+"..."
-                else:
-                    return s
-
-            if ( self.initCharsOrig != self.bodyCharsOrig ):
-                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
-            else:
-                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
-
-        return self.strRepr
-
-
-class Regex(Token):
-    r"""
-    Token for matching strings that match a given regular expression.
-    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
-    If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as 
-    named parse results.
-
-    Example::
-        realnum = Regex(r"[+-]?\d+\.\d*")
-        date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
-        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
-        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
-    """
-    compiledREtype = type(re.compile("[A-Z]"))
-    def __init__( self, pattern, flags=0):
-        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
-        super(Regex,self).__init__()
-
-        if isinstance(pattern, basestring):
-            if not pattern:
-                warnings.warn("null string passed to Regex; use Empty() instead",
-                        SyntaxWarning, stacklevel=2)
-
-            self.pattern = pattern
-            self.flags = flags
-
-            try:
-                self.re = re.compile(self.pattern, self.flags)
-                self.reString = self.pattern
-            except sre_constants.error:
-                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
-                    SyntaxWarning, stacklevel=2)
-                raise
-
-        elif isinstance(pattern, Regex.compiledREtype):
-            self.re = pattern
-            self.pattern = \
-            self.reString = str(pattern)
-            self.flags = flags
-            
-        else:
-            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        result = self.re.match(instring,loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        d = result.groupdict()
-        ret = ParseResults(result.group())
-        if d:
-            for k in d:
-                ret[k] = d[k]
-        return loc,ret
-
-    def __str__( self ):
-        try:
-            return super(Regex,self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None:
-            self.strRepr = "Re:(%s)" % repr(self.pattern)
-
-        return self.strRepr
-
-
-class QuotedString(Token):
-    r"""
-    Token for matching strings that are delimited by quoting characters.
-    
-    Defined with the following parameters:
-        - quoteChar - string of one or more characters defining the quote delimiting string
-        - escChar - character to escape quotes, typically backslash (default=C{None})
-        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
-        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
-        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
-        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
-        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
-
-    Example::
-        qs = QuotedString('"')
-        print(qs.searchString('lsjdf "This is the quote" sldjf'))
-        complex_qs = QuotedString('{{', endQuoteChar='}}')
-        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
-        sql_qs = QuotedString('"', escQuote='""')
-        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
-    prints::
-        [['This is the quote']]
-        [['This is the "quote"']]
-        [['This is the quote with "embedded" quotes']]
-    """
-    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
-        super(QuotedString,self).__init__()
-
-        # remove white space from quote chars - wont work anyway
-        quoteChar = quoteChar.strip()
-        if not quoteChar:
-            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
-            raise SyntaxError()
-
-        if endQuoteChar is None:
-            endQuoteChar = quoteChar
-        else:
-            endQuoteChar = endQuoteChar.strip()
-            if not endQuoteChar:
-                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
-                raise SyntaxError()
-
-        self.quoteChar = quoteChar
-        self.quoteCharLen = len(quoteChar)
-        self.firstQuoteChar = quoteChar[0]
-        self.endQuoteChar = endQuoteChar
-        self.endQuoteCharLen = len(endQuoteChar)
-        self.escChar = escChar
-        self.escQuote = escQuote
-        self.unquoteResults = unquoteResults
-        self.convertWhitespaceEscapes = convertWhitespaceEscapes
-
-        if multiline:
-            self.flags = re.MULTILINE | re.DOTALL
-            self.pattern = r'%s(?:[^%s%s]' % \
-                ( re.escape(self.quoteChar),
-                  _escapeRegexRangeChars(self.endQuoteChar[0]),
-                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
-        else:
-            self.flags = 0
-            self.pattern = r'%s(?:[^%s\n\r%s]' % \
-                ( re.escape(self.quoteChar),
-                  _escapeRegexRangeChars(self.endQuoteChar[0]),
-                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
-        if len(self.endQuoteChar) > 1:
-            self.pattern += (
-                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
-                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
-                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
-                )
-        if escQuote:
-            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
-        if escChar:
-            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
-            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
-        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
-
-        try:
-            self.re = re.compile(self.pattern, self.flags)
-            self.reString = self.pattern
-        except sre_constants.error:
-            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
-                SyntaxWarning, stacklevel=2)
-            raise
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = result.group()
-
-        if self.unquoteResults:
-
-            # strip off quotes
-            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
-
-            if isinstance(ret,basestring):
-                # replace escaped whitespace
-                if '\\' in ret and self.convertWhitespaceEscapes:
-                    ws_map = {
-                        r'\t' : '\t',
-                        r'\n' : '\n',
-                        r'\f' : '\f',
-                        r'\r' : '\r',
-                    }
-                    for wslit,wschar in ws_map.items():
-                        ret = ret.replace(wslit, wschar)
-
-                # replace escaped characters
-                if self.escChar:
-                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
-
-                # replace escaped quotes
-                if self.escQuote:
-                    ret = ret.replace(self.escQuote, self.endQuoteChar)
-
-        return loc, ret
-
-    def __str__( self ):
-        try:
-            return super(QuotedString,self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None:
-            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
-
-        return self.strRepr
-
-
-class CharsNotIn(Token):
-    """
-    Token for matching words composed of characters I{not} in a given set (will
-    include whitespace in matched characters if not listed in the provided exclusion set - see example).
-    Defined with string containing all disallowed characters, and an optional
-    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
-    minimum value < 1 is not valid); the default values for C{max} and C{exact}
-    are 0, meaning no maximum or exact length restriction.
-
-    Example::
-        # define a comma-separated-value as anything that is not a ','
-        csv_value = CharsNotIn(',')
-        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
-    prints::
-        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
-    """
-    def __init__( self, notChars, min=1, max=0, exact=0 ):
-        super(CharsNotIn,self).__init__()
-        self.skipWhitespace = False
-        self.notChars = notChars
-
-        if min < 1:
-            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = ( self.minLen == 0 )
-        self.mayIndexError = False
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if instring[loc] in self.notChars:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        start = loc
-        loc += 1
-        notchars = self.notChars
-        maxlen = min( start+self.maxLen, len(instring) )
-        while loc < maxlen and \
-              (instring[loc] not in notchars):
-            loc += 1
-
-        if loc - start < self.minLen:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        return loc, instring[start:loc]
-
-    def __str__( self ):
-        try:
-            return super(CharsNotIn, self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None:
-            if len(self.notChars) > 4:
-                self.strRepr = "!W:(%s...)" % self.notChars[:4]
-            else:
-                self.strRepr = "!W:(%s)" % self.notChars
-
-        return self.strRepr
-
-class White(Token):
-    """
-    Special matching class for matching whitespace.  Normally, whitespace is ignored
-    by pyparsing grammars.  This class is included when some whitespace structures
-    are significant.  Define with a string containing the whitespace characters to be
-    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
-    as defined for the C{L{Word}} class.
-    """
-    whiteStrs = {
-        " " : "",
-        "\t": "",
-        "\n": "",
-        "\r": "",
-        "\f": "",
-        }
-    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
-        super(White,self).__init__()
-        self.matchWhite = ws
-        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
-        #~ self.leaveWhitespace()
-        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
-        self.mayReturnEmpty = True
-        self.errmsg = "Expected " + self.name
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if not(instring[ loc ] in self.matchWhite):
-            raise ParseException(instring, loc, self.errmsg, self)
-        start = loc
-        loc += 1
-        maxloc = start + self.maxLen
-        maxloc = min( maxloc, len(instring) )
-        while loc < maxloc and instring[loc] in self.matchWhite:
-            loc += 1
-
-        if loc - start < self.minLen:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        return loc, instring[start:loc]
-
-
-class _PositionToken(Token):
-    def __init__( self ):
-        super(_PositionToken,self).__init__()
-        self.name=self.__class__.__name__
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-
-class GoToColumn(_PositionToken):
-    """
-    Token to advance to a specific column of input text; useful for tabular report scraping.
-    """
-    def __init__( self, colno ):
-        super(GoToColumn,self).__init__()
-        self.col = colno
-
-    def preParse( self, instring, loc ):
-        if col(loc,instring) != self.col:
-            instrlen = len(instring)
-            if self.ignoreExprs:
-                loc = self._skipIgnorables( instring, loc )
-            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
-                loc += 1
-        return loc
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        thiscol = col( loc, instring )
-        if thiscol > self.col:
-            raise ParseException( instring, loc, "Text not in expected column", self )
-        newloc = loc + self.col - thiscol
-        ret = instring[ loc: newloc ]
-        return newloc, ret
-
-
-class LineStart(_PositionToken):
-    """
-    Matches if current position is at the beginning of a line within the parse string
-    
-    Example::
-    
-        test = '''\
-        AAA this line
-        AAA and this line
-          AAA but not this one
-        B AAA and definitely not this one
-        '''
-
-        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
-            print(t)
-    
-    Prints::
-        ['AAA', ' this line']
-        ['AAA', ' and this line']    
-
-    """
-    def __init__( self ):
-        super(LineStart,self).__init__()
-        self.errmsg = "Expected start of line"
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if col(loc, instring) == 1:
-            return loc, []
-        raise ParseException(instring, loc, self.errmsg, self)
-
-class LineEnd(_PositionToken):
-    """
-    Matches if current position is at the end of a line within the parse string
-    """
-    def __init__( self ):
-        super(LineEnd,self).__init__()
-        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
-        self.errmsg = "Expected end of line"
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if loc len(instring):
-            return loc, []
-        else:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-class WordStart(_PositionToken):
-    """
-    Matches if the current position is at the beginning of a Word, and
-    is not preceded by any character in a given set of C{wordChars}
-    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
-    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
-    the string being parsed, or at the beginning of a line.
-    """
-    def __init__(self, wordChars = printables):
-        super(WordStart,self).__init__()
-        self.wordChars = set(wordChars)
-        self.errmsg = "Not at the start of a word"
-
-    def parseImpl(self, instring, loc, doActions=True ):
-        if loc != 0:
-            if (instring[loc-1] in self.wordChars or
-                instring[loc] not in self.wordChars):
-                raise ParseException(instring, loc, self.errmsg, self)
-        return loc, []
-
-class WordEnd(_PositionToken):
-    """
-    Matches if the current position is at the end of a Word, and
-    is not followed by any character in a given set of C{wordChars}
-    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
-    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
-    the string being parsed, or at the end of a line.
-    """
-    def __init__(self, wordChars = printables):
-        super(WordEnd,self).__init__()
-        self.wordChars = set(wordChars)
-        self.skipWhitespace = False
-        self.errmsg = "Not at the end of a word"
-
-    def parseImpl(self, instring, loc, doActions=True ):
-        instrlen = len(instring)
-        if instrlen>0 and loc maxExcLoc:
-                    maxException = err
-                    maxExcLoc = err.loc
-            except IndexError:
-                if len(instring) > maxExcLoc:
-                    maxException = ParseException(instring,len(instring),e.errmsg,self)
-                    maxExcLoc = len(instring)
-            else:
-                # save match among all matches, to retry longest to shortest
-                matches.append((loc2, e))
-
-        if matches:
-            matches.sort(key=lambda x: -x[0])
-            for _,e in matches:
-                try:
-                    return e._parse( instring, loc, doActions )
-                except ParseException as err:
-                    err.__traceback__ = None
-                    if err.loc > maxExcLoc:
-                        maxException = err
-                        maxExcLoc = err.loc
-
-        if maxException is not None:
-            maxException.msg = self.errmsg
-            raise maxException
-        else:
-            raise ParseException(instring, loc, "no defined alternatives to match", self)
-
-
-    def __ixor__(self, other ):
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        return self.append( other ) #Or( [ self, other ] )
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
-
-        return self.strRepr
-
-    def checkRecursion( self, parseElementList ):
-        subRecCheckList = parseElementList[:] + [ self ]
-        for e in self.exprs:
-            e.checkRecursion( subRecCheckList )
-
-
-class MatchFirst(ParseExpression):
-    """
-    Requires that at least one C{ParseExpression} is found.
-    If two expressions match, the first one listed is the one that will match.
-    May be constructed using the C{'|'} operator.
-
-    Example::
-        # construct MatchFirst using '|' operator
-        
-        # watch the order of expressions to match
-        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
-        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
-
-        # put more selective expression first
-        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
-        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
-    """
-    def __init__( self, exprs, savelist = False ):
-        super(MatchFirst,self).__init__(exprs, savelist)
-        if self.exprs:
-            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
-        else:
-            self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        maxExcLoc = -1
-        maxException = None
-        for e in self.exprs:
-            try:
-                ret = e._parse( instring, loc, doActions )
-                return ret
-            except ParseException as err:
-                if err.loc > maxExcLoc:
-                    maxException = err
-                    maxExcLoc = err.loc
-            except IndexError:
-                if len(instring) > maxExcLoc:
-                    maxException = ParseException(instring,len(instring),e.errmsg,self)
-                    maxExcLoc = len(instring)
-
-        # only got here if no expression matched, raise exception for match that made it the furthest
-        else:
-            if maxException is not None:
-                maxException.msg = self.errmsg
-                raise maxException
-            else:
-                raise ParseException(instring, loc, "no defined alternatives to match", self)
-
-    def __ior__(self, other ):
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        return self.append( other ) #MatchFirst( [ self, other ] )
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
-
-        return self.strRepr
-
-    def checkRecursion( self, parseElementList ):
-        subRecCheckList = parseElementList[:] + [ self ]
-        for e in self.exprs:
-            e.checkRecursion( subRecCheckList )
-
-
-class Each(ParseExpression):
-    """
-    Requires all given C{ParseExpression}s to be found, but in any order.
-    Expressions may be separated by whitespace.
-    May be constructed using the C{'&'} operator.
-
-    Example::
-        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
-        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
-        integer = Word(nums)
-        shape_attr = "shape:" + shape_type("shape")
-        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
-        color_attr = "color:" + color("color")
-        size_attr = "size:" + integer("size")
-
-        # use Each (using operator '&') to accept attributes in any order 
-        # (shape and posn are required, color and size are optional)
-        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
-
-        shape_spec.runTests('''
-            shape: SQUARE color: BLACK posn: 100, 120
-            shape: CIRCLE size: 50 color: BLUE posn: 50,80
-            color:GREEN size:20 shape:TRIANGLE posn:20,40
-            '''
-            )
-    prints::
-        shape: SQUARE color: BLACK posn: 100, 120
-        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
-        - color: BLACK
-        - posn: ['100', ',', '120']
-          - x: 100
-          - y: 120
-        - shape: SQUARE
-
-
-        shape: CIRCLE size: 50 color: BLUE posn: 50,80
-        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
-        - color: BLUE
-        - posn: ['50', ',', '80']
-          - x: 50
-          - y: 80
-        - shape: CIRCLE
-        - size: 50
-
-
-        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
-        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
-        - color: GREEN
-        - posn: ['20', ',', '40']
-          - x: 20
-          - y: 40
-        - shape: TRIANGLE
-        - size: 20
-    """
-    def __init__( self, exprs, savelist = True ):
-        super(Each,self).__init__(exprs, savelist)
-        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
-        self.skipWhitespace = True
-        self.initExprGroups = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.initExprGroups:
-            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
-            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
-            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
-            self.optionals = opt1 + opt2
-            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
-            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
-            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
-            self.required += self.multirequired
-            self.initExprGroups = False
-        tmpLoc = loc
-        tmpReqd = self.required[:]
-        tmpOpt  = self.optionals[:]
-        matchOrder = []
-
-        keepMatching = True
-        while keepMatching:
-            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
-            failed = []
-            for e in tmpExprs:
-                try:
-                    tmpLoc = e.tryParse( instring, tmpLoc )
-                except ParseException:
-                    failed.append(e)
-                else:
-                    matchOrder.append(self.opt1map.get(id(e),e))
-                    if e in tmpReqd:
-                        tmpReqd.remove(e)
-                    elif e in tmpOpt:
-                        tmpOpt.remove(e)
-            if len(failed) == len(tmpExprs):
-                keepMatching = False
-
-        if tmpReqd:
-            missing = ", ".join(_ustr(e) for e in tmpReqd)
-            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
-
-        # add any unmatched Optionals, in case they have default values defined
-        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
-
-        resultlist = []
-        for e in matchOrder:
-            loc,results = e._parse(instring,loc,doActions)
-            resultlist.append(results)
-
-        finalResults = sum(resultlist, ParseResults([]))
-        return loc, finalResults
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
-
-        return self.strRepr
-
-    def checkRecursion( self, parseElementList ):
-        subRecCheckList = parseElementList[:] + [ self ]
-        for e in self.exprs:
-            e.checkRecursion( subRecCheckList )
-
-
-class ParseElementEnhance(ParserElement):
-    """
-    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
-    """
-    def __init__( self, expr, savelist=False ):
-        super(ParseElementEnhance,self).__init__(savelist)
-        if isinstance( expr, basestring ):
-            if issubclass(ParserElement._literalStringClass, Token):
-                expr = ParserElement._literalStringClass(expr)
-            else:
-                expr = ParserElement._literalStringClass(Literal(expr))
-        self.expr = expr
-        self.strRepr = None
-        if expr is not None:
-            self.mayIndexError = expr.mayIndexError
-            self.mayReturnEmpty = expr.mayReturnEmpty
-            self.setWhitespaceChars( expr.whiteChars )
-            self.skipWhitespace = expr.skipWhitespace
-            self.saveAsList = expr.saveAsList
-            self.callPreparse = expr.callPreparse
-            self.ignoreExprs.extend(expr.ignoreExprs)
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.expr is not None:
-            return self.expr._parse( instring, loc, doActions, callPreParse=False )
-        else:
-            raise ParseException("",loc,self.errmsg,self)
-
-    def leaveWhitespace( self ):
-        self.skipWhitespace = False
-        self.expr = self.expr.copy()
-        if self.expr is not None:
-            self.expr.leaveWhitespace()
-        return self
-
-    def ignore( self, other ):
-        if isinstance( other, Suppress ):
-            if other not in self.ignoreExprs:
-                super( ParseElementEnhance, self).ignore( other )
-                if self.expr is not None:
-                    self.expr.ignore( self.ignoreExprs[-1] )
-        else:
-            super( ParseElementEnhance, self).ignore( other )
-            if self.expr is not None:
-                self.expr.ignore( self.ignoreExprs[-1] )
-        return self
-
-    def streamline( self ):
-        super(ParseElementEnhance,self).streamline()
-        if self.expr is not None:
-            self.expr.streamline()
-        return self
-
-    def checkRecursion( self, parseElementList ):
-        if self in parseElementList:
-            raise RecursiveGrammarException( parseElementList+[self] )
-        subRecCheckList = parseElementList[:] + [ self ]
-        if self.expr is not None:
-            self.expr.checkRecursion( subRecCheckList )
-
-    def validate( self, validateTrace=[] ):
-        tmp = validateTrace[:]+[self]
-        if self.expr is not None:
-            self.expr.validate(tmp)
-        self.checkRecursion( [] )
-
-    def __str__( self ):
-        try:
-            return super(ParseElementEnhance,self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None and self.expr is not None:
-            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
-        return self.strRepr
-
-
-class FollowedBy(ParseElementEnhance):
-    """
-    Lookahead matching of the given parse expression.  C{FollowedBy}
-    does I{not} advance the parsing position within the input string, it only
-    verifies that the specified parse expression matches at the current
-    position.  C{FollowedBy} always returns a null token list.
-
-    Example::
-        # use FollowedBy to match a label only if it is followed by a ':'
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-        
-        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
-    prints::
-        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
-    """
-    def __init__( self, expr ):
-        super(FollowedBy,self).__init__(expr)
-        self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        self.expr.tryParse( instring, loc )
-        return loc, []
-
-
-class NotAny(ParseElementEnhance):
-    """
-    Lookahead to disallow matching with the given parse expression.  C{NotAny}
-    does I{not} advance the parsing position within the input string, it only
-    verifies that the specified parse expression does I{not} match at the current
-    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
-    always returns a null token list.  May be constructed using the '~' operator.
-
-    Example::
-        
-    """
-    def __init__( self, expr ):
-        super(NotAny,self).__init__(expr)
-        #~ self.leaveWhitespace()
-        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
-        self.mayReturnEmpty = True
-        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.expr.canParseNext(instring, loc):
-            raise ParseException(instring, loc, self.errmsg, self)
-        return loc, []
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "~{" + _ustr(self.expr) + "}"
-
-        return self.strRepr
-
-class _MultipleMatch(ParseElementEnhance):
-    def __init__( self, expr, stopOn=None):
-        super(_MultipleMatch, self).__init__(expr)
-        self.saveAsList = True
-        ender = stopOn
-        if isinstance(ender, basestring):
-            ender = ParserElement._literalStringClass(ender)
-        self.not_ender = ~ender if ender is not None else None
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        self_expr_parse = self.expr._parse
-        self_skip_ignorables = self._skipIgnorables
-        check_ender = self.not_ender is not None
-        if check_ender:
-            try_not_ender = self.not_ender.tryParse
-        
-        # must be at least one (but first see if we are the stopOn sentinel;
-        # if so, fail)
-        if check_ender:
-            try_not_ender(instring, loc)
-        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
-        try:
-            hasIgnoreExprs = (not not self.ignoreExprs)
-            while 1:
-                if check_ender:
-                    try_not_ender(instring, loc)
-                if hasIgnoreExprs:
-                    preloc = self_skip_ignorables( instring, loc )
-                else:
-                    preloc = loc
-                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
-                if tmptokens or tmptokens.haskeys():
-                    tokens += tmptokens
-        except (ParseException,IndexError):
-            pass
-
-        return loc, tokens
-        
-class OneOrMore(_MultipleMatch):
-    """
-    Repetition of one or more of the given expression.
-    
-    Parameters:
-     - expr - expression that must match one or more times
-     - stopOn - (default=C{None}) - expression for a terminating sentinel
-          (only required if the sentinel would ordinarily match the repetition 
-          expression)          
-
-    Example::
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
-        text = "shape: SQUARE posn: upper left color: BLACK"
-        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
-
-        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
-        
-        # could also be written as
-        (attr_expr * (1,)).parseString(text).pprint()
-    """
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "{" + _ustr(self.expr) + "}..."
-
-        return self.strRepr
-
-class ZeroOrMore(_MultipleMatch):
-    """
-    Optional repetition of zero or more of the given expression.
-    
-    Parameters:
-     - expr - expression that must match zero or more times
-     - stopOn - (default=C{None}) - expression for a terminating sentinel
-          (only required if the sentinel would ordinarily match the repetition 
-          expression)          
-
-    Example: similar to L{OneOrMore}
-    """
-    def __init__( self, expr, stopOn=None):
-        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
-        self.mayReturnEmpty = True
-        
-    def parseImpl( self, instring, loc, doActions=True ):
-        try:
-            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
-        except (ParseException,IndexError):
-            return loc, []
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "[" + _ustr(self.expr) + "]..."
-
-        return self.strRepr
-
-class _NullToken(object):
-    def __bool__(self):
-        return False
-    __nonzero__ = __bool__
-    def __str__(self):
-        return ""
-
-_optionalNotMatched = _NullToken()
-class Optional(ParseElementEnhance):
-    """
-    Optional matching of the given expression.
-
-    Parameters:
-     - expr - expression that must match zero or more times
-     - default (optional) - value to be returned if the optional expression is not found.
-
-    Example::
-        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
-        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
-        zip.runTests('''
-            # traditional ZIP code
-            12345
-            
-            # ZIP+4 form
-            12101-0001
-            
-            # invalid ZIP
-            98765-
-            ''')
-    prints::
-        # traditional ZIP code
-        12345
-        ['12345']
-
-        # ZIP+4 form
-        12101-0001
-        ['12101-0001']
-
-        # invalid ZIP
-        98765-
-             ^
-        FAIL: Expected end of text (at char 5), (line:1, col:6)
-    """
-    def __init__( self, expr, default=_optionalNotMatched ):
-        super(Optional,self).__init__( expr, savelist=False )
-        self.saveAsList = self.expr.saveAsList
-        self.defaultValue = default
-        self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        try:
-            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
-        except (ParseException,IndexError):
-            if self.defaultValue is not _optionalNotMatched:
-                if self.expr.resultsName:
-                    tokens = ParseResults([ self.defaultValue ])
-                    tokens[self.expr.resultsName] = self.defaultValue
-                else:
-                    tokens = [ self.defaultValue ]
-            else:
-                tokens = []
-        return loc, tokens
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "[" + _ustr(self.expr) + "]"
-
-        return self.strRepr
-
-class SkipTo(ParseElementEnhance):
-    """
-    Token for skipping over all undefined text until the matched expression is found.
-
-    Parameters:
-     - expr - target expression marking the end of the data to be skipped
-     - include - (default=C{False}) if True, the target expression is also parsed 
-          (the skipped text and target expression are returned as a 2-element list).
-     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
-          comments) that might contain false matches to the target expression
-     - failOn - (default=C{None}) define expressions that are not allowed to be 
-          included in the skipped test; if found before the target expression is found, 
-          the SkipTo is not a match
-
-    Example::
-        report = '''
-            Outstanding Issues Report - 1 Jan 2000
-
-               # | Severity | Description                               |  Days Open
-            -----+----------+-------------------------------------------+-----------
-             101 | Critical | Intermittent system crash                 |          6
-              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
-              79 | Minor    | System slow when running too many reports |         47
-            '''
-        integer = Word(nums)
-        SEP = Suppress('|')
-        # use SkipTo to simply match everything up until the next SEP
-        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
-        # - parse action will call token.strip() for each matched token, i.e., the description body
-        string_data = SkipTo(SEP, ignore=quotedString)
-        string_data.setParseAction(tokenMap(str.strip))
-        ticket_expr = (integer("issue_num") + SEP 
-                      + string_data("sev") + SEP 
-                      + string_data("desc") + SEP 
-                      + integer("days_open"))
-        
-        for tkt in ticket_expr.searchString(report):
-            print tkt.dump()
-    prints::
-        ['101', 'Critical', 'Intermittent system crash', '6']
-        - days_open: 6
-        - desc: Intermittent system crash
-        - issue_num: 101
-        - sev: Critical
-        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
-        - days_open: 14
-        - desc: Spelling error on Login ('log|n')
-        - issue_num: 94
-        - sev: Cosmetic
-        ['79', 'Minor', 'System slow when running too many reports', '47']
-        - days_open: 47
-        - desc: System slow when running too many reports
-        - issue_num: 79
-        - sev: Minor
-    """
-    def __init__( self, other, include=False, ignore=None, failOn=None ):
-        super( SkipTo, self ).__init__( other )
-        self.ignoreExpr = ignore
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-        self.includeMatch = include
-        self.asList = False
-        if isinstance(failOn, basestring):
-            self.failOn = ParserElement._literalStringClass(failOn)
-        else:
-            self.failOn = failOn
-        self.errmsg = "No match found for "+_ustr(self.expr)
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        startloc = loc
-        instrlen = len(instring)
-        expr = self.expr
-        expr_parse = self.expr._parse
-        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
-        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
-        
-        tmploc = loc
-        while tmploc <= instrlen:
-            if self_failOn_canParseNext is not None:
-                # break if failOn expression matches
-                if self_failOn_canParseNext(instring, tmploc):
-                    break
-                    
-            if self_ignoreExpr_tryParse is not None:
-                # advance past ignore expressions
-                while 1:
-                    try:
-                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
-                    except ParseBaseException:
-                        break
-            
-            try:
-                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
-            except (ParseException, IndexError):
-                # no match, advance loc in string
-                tmploc += 1
-            else:
-                # matched skipto expr, done
-                break
-
-        else:
-            # ran off the end of the input string without matching skipto expr, fail
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        # build up return values
-        loc = tmploc
-        skiptext = instring[startloc:loc]
-        skipresult = ParseResults(skiptext)
-        
-        if self.includeMatch:
-            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
-            skipresult += mat
-
-        return loc, skipresult
-
-class Forward(ParseElementEnhance):
-    """
-    Forward declaration of an expression to be defined later -
-    used for recursive grammars, such as algebraic infix notation.
-    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
-
-    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
-    Specifically, '|' has a lower precedence than '<<', so that::
-        fwdExpr << a | b | c
-    will actually be evaluated as::
-        (fwdExpr << a) | b | c
-    thereby leaving b and c out as parseable alternatives.  It is recommended that you
-    explicitly group the values inserted into the C{Forward}::
-        fwdExpr << (a | b | c)
-    Converting to use the '<<=' operator instead will avoid this problem.
-
-    See L{ParseResults.pprint} for an example of a recursive parser created using
-    C{Forward}.
-    """
-    def __init__( self, other=None ):
-        super(Forward,self).__init__( other, savelist=False )
-
-    def __lshift__( self, other ):
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass(other)
-        self.expr = other
-        self.strRepr = None
-        self.mayIndexError = self.expr.mayIndexError
-        self.mayReturnEmpty = self.expr.mayReturnEmpty
-        self.setWhitespaceChars( self.expr.whiteChars )
-        self.skipWhitespace = self.expr.skipWhitespace
-        self.saveAsList = self.expr.saveAsList
-        self.ignoreExprs.extend(self.expr.ignoreExprs)
-        return self
-        
-    def __ilshift__(self, other):
-        return self << other
-    
-    def leaveWhitespace( self ):
-        self.skipWhitespace = False
-        return self
-
-    def streamline( self ):
-        if not self.streamlined:
-            self.streamlined = True
-            if self.expr is not None:
-                self.expr.streamline()
-        return self
-
-    def validate( self, validateTrace=[] ):
-        if self not in validateTrace:
-            tmp = validateTrace[:]+[self]
-            if self.expr is not None:
-                self.expr.validate(tmp)
-        self.checkRecursion([])
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-        return self.__class__.__name__ + ": ..."
-
-        # stubbed out for now - creates awful memory and perf issues
-        self._revertClass = self.__class__
-        self.__class__ = _ForwardNoRecurse
-        try:
-            if self.expr is not None:
-                retString = _ustr(self.expr)
-            else:
-                retString = "None"
-        finally:
-            self.__class__ = self._revertClass
-        return self.__class__.__name__ + ": " + retString
-
-    def copy(self):
-        if self.expr is not None:
-            return super(Forward,self).copy()
-        else:
-            ret = Forward()
-            ret <<= self
-            return ret
-
-class _ForwardNoRecurse(Forward):
-    def __str__( self ):
-        return "..."
-
-class TokenConverter(ParseElementEnhance):
-    """
-    Abstract subclass of C{ParseExpression}, for converting parsed results.
-    """
-    def __init__( self, expr, savelist=False ):
-        super(TokenConverter,self).__init__( expr )#, savelist )
-        self.saveAsList = False
-
-class Combine(TokenConverter):
-    """
-    Converter to concatenate all matching tokens to a single string.
-    By default, the matching patterns must also be contiguous in the input string;
-    this can be disabled by specifying C{'adjacent=False'} in the constructor.
-
-    Example::
-        real = Word(nums) + '.' + Word(nums)
-        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
-        # will also erroneously match the following
-        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
-
-        real = Combine(Word(nums) + '.' + Word(nums))
-        print(real.parseString('3.1416')) # -> ['3.1416']
-        # no match when there are internal spaces
-        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
-    """
-    def __init__( self, expr, joinString="", adjacent=True ):
-        super(Combine,self).__init__( expr )
-        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
-        if adjacent:
-            self.leaveWhitespace()
-        self.adjacent = adjacent
-        self.skipWhitespace = True
-        self.joinString = joinString
-        self.callPreparse = True
-
-    def ignore( self, other ):
-        if self.adjacent:
-            ParserElement.ignore(self, other)
-        else:
-            super( Combine, self).ignore( other )
-        return self
-
-    def postParse( self, instring, loc, tokenlist ):
-        retToks = tokenlist.copy()
-        del retToks[:]
-        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
-
-        if self.resultsName and retToks.haskeys():
-            return [ retToks ]
-        else:
-            return retToks
-
-class Group(TokenConverter):
-    """
-    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
-
-    Example::
-        ident = Word(alphas)
-        num = Word(nums)
-        term = ident | num
-        func = ident + Optional(delimitedList(term))
-        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']
-
-        func = ident + Group(Optional(delimitedList(term)))
-        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
-    """
-    def __init__( self, expr ):
-        super(Group,self).__init__( expr )
-        self.saveAsList = True
-
-    def postParse( self, instring, loc, tokenlist ):
-        return [ tokenlist ]
-
-class Dict(TokenConverter):
-    """
-    Converter to return a repetitive expression as a list, but also as a dictionary.
-    Each element can also be referenced using the first token in the expression as its key.
-    Useful for tabular report scraping when the first column can be used as a item key.
-
-    Example::
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
-        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
-        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-        
-        # print attributes as plain groups
-        print(OneOrMore(attr_expr).parseString(text).dump())
-        
-        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
-        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
-        print(result.dump())
-        
-        # access named fields as dict entries, or output as dict
-        print(result['shape'])        
-        print(result.asDict())
-    prints::
-        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
-
-        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
-        - color: light blue
-        - posn: upper left
-        - shape: SQUARE
-        - texture: burlap
-        SQUARE
-        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
-    See more examples at L{ParseResults} of accessing fields by results name.
-    """
-    def __init__( self, expr ):
-        super(Dict,self).__init__( expr )
-        self.saveAsList = True
-
-    def postParse( self, instring, loc, tokenlist ):
-        for i,tok in enumerate(tokenlist):
-            if len(tok) == 0:
-                continue
-            ikey = tok[0]
-            if isinstance(ikey,int):
-                ikey = _ustr(tok[0]).strip()
-            if len(tok)==1:
-                tokenlist[ikey] = _ParseResultsWithOffset("",i)
-            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
-                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
-            else:
-                dictvalue = tok.copy() #ParseResults(i)
-                del dictvalue[0]
-                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
-                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
-                else:
-                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
-
-        if self.resultsName:
-            return [ tokenlist ]
-        else:
-            return tokenlist
-
-
-class Suppress(TokenConverter):
-    """
-    Converter for ignoring the results of a parsed expression.
-
-    Example::
-        source = "a, b, c,d"
-        wd = Word(alphas)
-        wd_list1 = wd + ZeroOrMore(',' + wd)
-        print(wd_list1.parseString(source))
-
-        # often, delimiters that are useful during parsing are just in the
-        # way afterward - use Suppress to keep them out of the parsed output
-        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
-        print(wd_list2.parseString(source))
-    prints::
-        ['a', ',', 'b', ',', 'c', ',', 'd']
-        ['a', 'b', 'c', 'd']
-    (See also L{delimitedList}.)
-    """
-    def postParse( self, instring, loc, tokenlist ):
-        return []
-
-    def suppress( self ):
-        return self
-
-
-class OnlyOnce(object):
-    """
-    Wrapper for parse actions, to ensure they are only called once.
-    """
-    def __init__(self, methodCall):
-        self.callable = _trim_arity(methodCall)
-        self.called = False
-    def __call__(self,s,l,t):
-        if not self.called:
-            results = self.callable(s,l,t)
-            self.called = True
-            return results
-        raise ParseException(s,l,"")
-    def reset(self):
-        self.called = False
-
-def traceParseAction(f):
-    """
-    Decorator for debugging parse actions. 
-    
-    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
-    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
-
-    Example::
-        wd = Word(alphas)
-
-        @traceParseAction
-        def remove_duplicate_chars(tokens):
-            return ''.join(sorted(set(''.join(tokens))))
-
-        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
-        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
-    prints::
-        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
-        <3:
-            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
-        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
-        try:
-            ret = f(*paArgs)
-        except Exception as exc:
-            sys.stderr.write( "< ['aa', 'bb', 'cc']
-        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
-    """
-    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
-    if combine:
-        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
-    else:
-        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
-
-def countedArray( expr, intExpr=None ):
-    """
-    Helper to define a counted list of expressions.
-    This helper defines a pattern of the form::
-        integer expr expr expr...
-    where the leading integer tells how many expr expressions follow.
-    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
-    
-    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
-
-    Example::
-        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']
-
-        # in this parser, the leading integer value is given in binary,
-        # '10' indicating that 2 values are in the array
-        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
-        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
-    """
-    arrayExpr = Forward()
-    def countFieldParseAction(s,l,t):
-        n = t[0]
-        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
-        return []
-    if intExpr is None:
-        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
-    else:
-        intExpr = intExpr.copy()
-    intExpr.setName("arrayLen")
-    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
-    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
-
-def _flatten(L):
-    ret = []
-    for i in L:
-        if isinstance(i,list):
-            ret.extend(_flatten(i))
-        else:
-            ret.append(i)
-    return ret
-
-def matchPreviousLiteral(expr):
-    """
-    Helper to define an expression that is indirectly defined from
-    the tokens matched in a previous expression, that is, it looks
-    for a 'repeat' of a previous expression.  For example::
-        first = Word(nums)
-        second = matchPreviousLiteral(first)
-        matchExpr = first + ":" + second
-    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
-    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
-    If this is not desired, use C{matchPreviousExpr}.
-    Do I{not} use with packrat parsing enabled.
-    """
-    rep = Forward()
-    def copyTokenToRepeater(s,l,t):
-        if t:
-            if len(t) == 1:
-                rep << t[0]
-            else:
-                # flatten t tokens
-                tflat = _flatten(t.asList())
-                rep << And(Literal(tt) for tt in tflat)
-        else:
-            rep << Empty()
-    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
-    rep.setName('(prev) ' + _ustr(expr))
-    return rep
-
-def matchPreviousExpr(expr):
-    """
-    Helper to define an expression that is indirectly defined from
-    the tokens matched in a previous expression, that is, it looks
-    for a 'repeat' of a previous expression.  For example::
-        first = Word(nums)
-        second = matchPreviousExpr(first)
-        matchExpr = first + ":" + second
-    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
-    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
-    the expressions are evaluated first, and then compared, so
-    C{"1"} is compared with C{"10"}.
-    Do I{not} use with packrat parsing enabled.
-    """
-    rep = Forward()
-    e2 = expr.copy()
-    rep <<= e2
-    def copyTokenToRepeater(s,l,t):
-        matchTokens = _flatten(t.asList())
-        def mustMatchTheseTokens(s,l,t):
-            theseTokens = _flatten(t.asList())
-            if  theseTokens != matchTokens:
-                raise ParseException("",0,"")
-        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
-    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
-    rep.setName('(prev) ' + _ustr(expr))
-    return rep
-
-def _escapeRegexRangeChars(s):
-    #~  escape these chars: ^-]
-    for c in r"\^-]":
-        s = s.replace(c,_bslash+c)
-    s = s.replace("\n",r"\n")
-    s = s.replace("\t",r"\t")
-    return _ustr(s)
-
-def oneOf( strs, caseless=False, useRegex=True ):
-    """
-    Helper to quickly define a set of alternative Literals, and makes sure to do
-    longest-first testing when there is a conflict, regardless of the input order,
-    but returns a C{L{MatchFirst}} for best performance.
-
-    Parameters:
-     - strs - a string of space-delimited literals, or a collection of string literals
-     - caseless - (default=C{False}) - treat all literals as caseless
-     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
-          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
-          if creating a C{Regex} raises an exception)
-
-    Example::
-        comp_oper = oneOf("< = > <= >= !=")
-        var = Word(alphas)
-        number = Word(nums)
-        term = var | number
-        comparison_expr = term + comp_oper + term
-        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
-    prints::
-        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
-    """
-    if caseless:
-        isequal = ( lambda a,b: a.upper() == b.upper() )
-        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
-        parseElementClass = CaselessLiteral
-    else:
-        isequal = ( lambda a,b: a == b )
-        masks = ( lambda a,b: b.startswith(a) )
-        parseElementClass = Literal
-
-    symbols = []
-    if isinstance(strs,basestring):
-        symbols = strs.split()
-    elif isinstance(strs, Iterable):
-        symbols = list(strs)
-    else:
-        warnings.warn("Invalid argument to oneOf, expected string or iterable",
-                SyntaxWarning, stacklevel=2)
-    if not symbols:
-        return NoMatch()
-
-    i = 0
-    while i < len(symbols)-1:
-        cur = symbols[i]
-        for j,other in enumerate(symbols[i+1:]):
-            if ( isequal(other, cur) ):
-                del symbols[i+j+1]
-                break
-            elif ( masks(cur, other) ):
-                del symbols[i+j+1]
-                symbols.insert(i,other)
-                cur = other
-                break
-        else:
-            i += 1
-
-    if not caseless and useRegex:
-        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
-        try:
-            if len(symbols)==len("".join(symbols)):
-                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
-            else:
-                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
-        except Exception:
-            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
-                    SyntaxWarning, stacklevel=2)
-
-
-    # last resort, just use MatchFirst
-    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
-
-def dictOf( key, value ):
-    """
-    Helper to easily and clearly define a dictionary by specifying the respective patterns
-    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
-    in the proper order.  The key pattern can include delimiting markers or punctuation,
-    as long as they are suppressed, thereby leaving the significant key text.  The value
-    pattern can include named results, so that the C{Dict} results can include named token
-    fields.
-
-    Example::
-        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
-        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-        print(OneOrMore(attr_expr).parseString(text).dump())
-        
-        attr_label = label
-        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
-
-        # similar to Dict, but simpler call format
-        result = dictOf(attr_label, attr_value).parseString(text)
-        print(result.dump())
-        print(result['shape'])
-        print(result.shape)  # object attribute access works too
-        print(result.asDict())
-    prints::
-        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
-        - color: light blue
-        - posn: upper left
-        - shape: SQUARE
-        - texture: burlap
-        SQUARE
-        SQUARE
-        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
-    """
-    return Dict( ZeroOrMore( Group ( key + value ) ) )
-
-def originalTextFor(expr, asString=True):
-    """
-    Helper to return the original, untokenized text for a given expression.  Useful to
-    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
-    revert separate tokens with intervening whitespace back to the original matching
-    input text. By default, returns astring containing the original parsed text.  
-       
-    If the optional C{asString} argument is passed as C{False}, then the return value is a 
-    C{L{ParseResults}} containing any results names that were originally matched, and a 
-    single token containing the original matched text from the input string.  So if 
-    the expression passed to C{L{originalTextFor}} contains expressions with defined
-    results names, you must set C{asString} to C{False} if you want to preserve those
-    results name values.
-
-    Example::
-        src = "this is test  bold text  normal text "
-        for tag in ("b","i"):
-            opener,closer = makeHTMLTags(tag)
-            patt = originalTextFor(opener + SkipTo(closer) + closer)
-            print(patt.searchString(src)[0])
-    prints::
-        [' bold text ']
-        ['text']
-    """
-    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
-    endlocMarker = locMarker.copy()
-    endlocMarker.callPreparse = False
-    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
-    if asString:
-        extractText = lambda s,l,t: s[t._original_start:t._original_end]
-    else:
-        def extractText(s,l,t):
-            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
-    matchExpr.setParseAction(extractText)
-    matchExpr.ignoreExprs = expr.ignoreExprs
-    return matchExpr
-
-def ungroup(expr): 
-    """
-    Helper to undo pyparsing's default grouping of And expressions, even
-    if all but one are non-empty.
-    """
-    return TokenConverter(expr).setParseAction(lambda t:t[0])
-
-def locatedExpr(expr):
-    """
-    Helper to decorate a returned token with its starting and ending locations in the input string.
-    This helper adds the following results names:
-     - locn_start = location where matched expression begins
-     - locn_end = location where matched expression ends
-     - value = the actual parsed results
-
-    Be careful if the input text contains C{} characters, you may want to call
-    C{L{ParserElement.parseWithTabs}}
-
-    Example::
-        wd = Word(alphas)
-        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
-            print(match)
-    prints::
-        [[0, 'ljsdf', 5]]
-        [[8, 'lksdjjf', 15]]
-        [[18, 'lkkjj', 23]]
-    """
-    locator = Empty().setParseAction(lambda s,l,t: l)
-    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
-
-
-# convenience constants for positional expressions
-empty       = Empty().setName("empty")
-lineStart   = LineStart().setName("lineStart")
-lineEnd     = LineEnd().setName("lineEnd")
-stringStart = StringStart().setName("stringStart")
-stringEnd   = StringEnd().setName("stringEnd")
-
-_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
-_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
-_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
-_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
-_charRange = Group(_singleChar + Suppress("-") + _singleChar)
-_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
-
-def srange(s):
-    r"""
-    Helper to easily define string ranges for use in Word construction.  Borrows
-    syntax from regexp '[]' string range definitions::
-        srange("[0-9]")   -> "0123456789"
-        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
-        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
-    The input string must be enclosed in []'s, and the returned string is the expanded
-    character set joined into a single string.
-    The values enclosed in the []'s may be:
-     - a single character
-     - an escaped character with a leading backslash (such as C{\-} or C{\]})
-     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
-         (C{\0x##} is also supported for backwards compatibility) 
-     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
-     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
-     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
-    """
-    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
-    try:
-        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
-    except Exception:
-        return ""
-
-def matchOnlyAtCol(n):
-    """
-    Helper method for defining parse actions that require matching at a specific
-    column in the input text.
-    """
-    def verifyCol(strg,locn,toks):
-        if col(locn,strg) != n:
-            raise ParseException(strg,locn,"matched token not at column %d" % n)
-    return verifyCol
-
-def replaceWith(replStr):
-    """
-    Helper method for common parse actions that simply return a literal value.  Especially
-    useful when used with C{L{transformString}()}.
-
-    Example::
-        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
-        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
-        term = na | num
-        
-        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
-    """
-    return lambda s,l,t: [replStr]
-
-def removeQuotes(s,l,t):
-    """
-    Helper parse action for removing quotation marks from parsed quoted strings.
-
-    Example::
-        # by default, quotation marks are included in parsed results
-        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
-        # use removeQuotes to strip quotation marks from parsed results
-        quotedString.setParseAction(removeQuotes)
-        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
-    """
-    return t[0][1:-1]
-
-def tokenMap(func, *args):
-    """
-    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
-    args are passed, they are forwarded to the given function as additional arguments after
-    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
-    parsed data to an integer using base 16.
-
-    Example (compare the last to example in L{ParserElement.transformString}::
-        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
-        hex_ints.runTests('''
-            00 11 22 aa FF 0a 0d 1a
-            ''')
-        
-        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
-        OneOrMore(upperword).runTests('''
-            my kingdom for a horse
-            ''')
-
-        wd = Word(alphas).setParseAction(tokenMap(str.title))
-        OneOrMore(wd).setParseAction(' '.join).runTests('''
-            now is the winter of our discontent made glorious summer by this sun of york
-            ''')
-    prints::
-        00 11 22 aa FF 0a 0d 1a
-        [0, 17, 34, 170, 255, 10, 13, 26]
-
-        my kingdom for a horse
-        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
-
-        now is the winter of our discontent made glorious summer by this sun of york
-        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
-    """
-    def pa(s,l,t):
-        return [func(tokn, *args) for tokn in t]
-
-    try:
-        func_name = getattr(func, '__name__', 
-                            getattr(func, '__class__').__name__)
-    except Exception:
-        func_name = str(func)
-    pa.__name__ = func_name
-
-    return pa
-
-upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
-"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
-
-downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
-"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
-    
-def _makeTags(tagStr, xml):
-    """Internal helper to construct opening and closing tag expressions, given a tag name"""
-    if isinstance(tagStr,basestring):
-        resname = tagStr
-        tagStr = Keyword(tagStr, caseless=not xml)
-    else:
-        resname = tagStr.name
-
-    tagAttrName = Word(alphas,alphanums+"_-:")
-    if (xml):
-        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
-        openTag = Suppress("<") + tagStr("tag") + \
-                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
-                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
-    else:
-        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
-        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
-        openTag = Suppress("<") + tagStr("tag") + \
-                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
-                Optional( Suppress("=") + tagAttrValue ) ))) + \
-                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
-    closeTag = Combine(_L("")
-
-    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
-    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % resname)
-    openTag.tag = resname
-    closeTag.tag = resname
-    return openTag, closeTag
-
-def makeHTMLTags(tagStr):
-    """
-    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
-    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
-
-    Example::
-        text = 'More info at the pyparsing wiki page'
-        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
-        a,a_end = makeHTMLTags("A")
-        link_expr = a + SkipTo(a_end)("link_text") + a_end
-        
-        for link in link_expr.searchString(text):
-            # attributes in the  tag (like "href" shown here) are also accessible as named results
-            print(link.link_text, '->', link.href)
-    prints::
-        pyparsing -> http://pyparsing.wikispaces.com
-    """
-    return _makeTags( tagStr, False )
-
-def makeXMLTags(tagStr):
-    """
-    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
-    tags only in the given upper/lower case.
-
-    Example: similar to L{makeHTMLTags}
-    """
-    return _makeTags( tagStr, True )
-
-def withAttribute(*args,**attrDict):
-    """
-    Helper to create a validating parse action to be used with start tags created
-    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
-    with a required attribute value, to avoid false matches on common tags such as
-    C{} or C{
}. - - Call C{withAttribute} with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in C{(align="right")}, or - - as an explicit dict with C{**} operator, when an attribute name is also a Python - reserved word, as in C{**{"class":"Customer", "align":"right"}} - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. - - If just testing for C{class} (with or without a namespace), use C{L{withClass}}. - - To verify that the attribute exists, but without specifying a value, pass - C{withAttribute.ANY_VALUE} as the value. - - Example:: - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - prints:: - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: - if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """ - Simplified version of C{L{withAttribute}} when matching on a div class - made - difficult because C{class} is a reserved word in Python. - - Example:: - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - prints:: - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr : classname}) - -opAssoc = _Constants() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): - """ - Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. The generated parser will also recognize the use - of parentheses to override operator precedences (see example below). - - Note: if you define a deep operator list, you may see performance issues - when using infixNotation. See L{ParserElement.enablePackrat} for a - mechanism to potentially improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted); if the parse action - is passed a tuple or list of functions, this is equivalent to - calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) - - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) - - Example:: - # simple example of four-function arithmetic with ints and variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - prints:: - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - ret = Forward() - lastExpr = baseExpr | ( lpar + ret + rpar ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) - else: - matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) - else: - matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.setParseAction(*pa) - else: - matchExpr.setParseAction(pa) - thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """ - Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - - content - expression for items within the nested lists (default=C{None}) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the C{ignoreExpr} argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. - The default is L{quotedString}, but if no expressions are to be ignored, - then pass C{None} for this argument. - - Example:: - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR,RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - prints:: - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) - else: - ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - ret.setName('nested %s%s expression' % (opener,closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """ - Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. - - Parameters: - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single grammar - should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the - the current level; set to False for block of left-most statements - (default=C{True}) - - A valid block must contain at least one C{blockStatement}. - - Example:: - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group( funcDecl + func_body ) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << ( funcDef | assignment | identifier ) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - prints:: - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - def checkPeerIndent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") - - def checkSubIndent(s,l,t): - curCol = col(l,s) - if curCol > indentStack[-1]: - indentStack.append( curCol ) - else: - raise ParseException(s,l,"not a subentry") - - def checkUnindent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) - else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) -commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form C{/* ... */}" - -htmlComment = Regex(r"").setName("HTML comment") -"Comment of the form C{}" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form C{// ... (to end of line)}" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") -"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" - -javaStyleComment = cppStyleComment -"Same as C{L{cppStyleComment}}" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form C{# ... (to end of line)}" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + - Optional( Word(" \t") + - ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") -commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. - This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """ - Here are some common low-level expressions that may be useful in jump-starting parser development: - - numeric forms (L{integers}, L{reals}, L{scientific notation}) - - common L{programming identifiers} - - network addresses (L{MAC}, L{IPv4}, L{IPv6}) - - ISO8601 L{dates} and L{datetime} - - L{UUID} - - L{comma-separated list} - Parse actions: - - C{L{convertToInteger}} - - C{L{convertToFloat}} - - C{L{convertToDate}} - - C{L{convertToDatetime}} - - C{L{stripHTMLTags}} - - C{L{upcaseTokens}} - - C{L{downcaseTokens}} - - Example:: - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - prints:: - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas+'_', alphanums+'_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (C{0.0.0.0 - 255.255.255.255})" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) - - Example:: - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - prints:: - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """ - Helper to create a parse action for converting parsed datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) - - Example:: - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - prints:: - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (C{yyyy-mm-dd})" - - iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """ - Parse action to remove HTML tags from web page HTML source - - Example:: - # strip HTML links from normal text - text = 'More info at the
pyparsing wiki page' - td,td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - - print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') - + Optional( White(" \t") ) ) ).streamline().setName("commaItem") - comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/extern/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/extern/__init__.py deleted file mode 100755 index 70897ee..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/extern/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -import importlib.util -import sys - - -class VendorImporter: - """ - A PEP 302 meta path importer for finding optionally-vendored - or otherwise naturally-installed packages from root_name. - """ - - def __init__(self, root_name, vendored_names=(), vendor_pkg=None): - self.root_name = root_name - self.vendored_names = set(vendored_names) - self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') - - @property - def search_path(self): - """ - Search first the vendor package then as a natural package. - """ - yield self.vendor_pkg + '.' - yield '' - - def _module_matches_namespace(self, fullname): - """Figure out if the target module is vendored.""" - root, base, target = fullname.partition(self.root_name + '.') - return not root and any(map(target.startswith, self.vendored_names)) - - def load_module(self, fullname): - """ - Iterate over the search path to locate and load fullname. - """ - root, base, target = fullname.partition(self.root_name + '.') - for prefix in self.search_path: - try: - extant = prefix + target - __import__(extant) - mod = sys.modules[extant] - sys.modules[fullname] = mod - return mod - except ImportError: - pass - else: - raise ImportError( - "The '{target}' package is required; " - "normally this is bundled with this package so if you get " - "this warning, consult the packager of your " - "distribution.".format(**locals()) - ) - - def create_module(self, spec): - return self.load_module(spec.name) - - def exec_module(self, module): - pass - - def find_spec(self, fullname, path=None, target=None): - """Return a module spec for vendored names.""" - return ( - importlib.util.spec_from_loader(fullname, self) - if self._module_matches_namespace(fullname) else None - ) - - def install(self): - """ - Install this importer into sys.meta_path if not already present. - """ - if self not in sys.meta_path: - sys.meta_path.append(self) - - -names = ( - 'packaging', 'pyparsing', 'appdirs', 'jaraco', 'importlib_resources', - 'more_itertools', -) -VendorImporter(__name__, names).install() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/cpp.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/cpp.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/ctokens.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/ctokens.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/lex.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/lex.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/yacc.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/yacc.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/ygen.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/ply/ygen.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/LICENSE deleted file mode 100644 index 1bf9852..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/METADATA deleted file mode 100644 index 2206ad9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/METADATA +++ /dev/null @@ -1,104 +0,0 @@ -Metadata-Version: 2.1 -Name: pyparsing -Version: 2.4.7 -Summary: Python parsing module -Home-page: https://github.com/pyparsing/pyparsing/ -Author: Paul McGuire -Author-email: ptmcg@users.sourceforge.net -License: MIT License -Download-URL: https://pypi.org/project/pyparsing/ -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: Intended Audience :: Information Technology -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.* - -PyParsing -- A Python Parsing Module -==================================== - -|Build Status| - -Introduction -============ - -The pyparsing module is an alternative approach to creating and -executing simple grammars, vs. the traditional lex/yacc approach, or the -use of regular expressions. The pyparsing module provides a library of -classes that client code uses to construct the grammar directly in -Python code. - -*[Since first writing this description of pyparsing in late 2003, this -technique for developing parsers has become more widespread, under the -name Parsing Expression Grammars - PEGs. See more information on PEGs at* -https://en.wikipedia.org/wiki/Parsing_expression_grammar *.]* - -Here is a program to parse ``"Hello, World!"`` (or any greeting of the form -``"salutation, addressee!"``): - -.. code:: python - - from pyparsing import Word, alphas - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print(hello, "->", greet.parseString(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the -self-explanatory class names, and the use of '+', '|' and '^' operator -definitions. - -The parsed results returned from ``parseString()`` can be accessed as a -nested list, a dictionary, or an object with named attributes. - -The pyparsing module handles some of the problems that are typically -vexing when writing text parsers: - -- extra or missing whitespace (the above program will also handle ``"Hello,World!"``, ``"Hello , World !"``, etc.) -- quoted strings -- embedded comments - -The examples directory includes a simple SQL parser, simple CORBA IDL -parser, a config file parser, a chemical formula parser, and a four- -function algebraic notation parser, among many others. - -Documentation -============= - -There are many examples in the online docstrings of the classes -and methods in pyparsing. You can find them compiled into online docs -at https://pyparsing-docs.readthedocs.io/en/latest/. Additional -documentation resources and project info are listed in the online -GitHub wiki, at https://github.com/pyparsing/pyparsing/wiki. An -entire directory of examples is at -https://github.com/pyparsing/pyparsing/tree/master/examples. - -License -======= - -MIT License. See header of pyparsing.py - -History -======= - -See CHANGES file. - -.. |Build Status| image:: https://travis-ci.org/pyparsing/pyparsing.svg?branch=master - :target: https://travis-ci.org/pyparsing/pyparsing - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/RECORD deleted file mode 100644 index dff93c8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/RECORD +++ /dev/null @@ -1,7 +0,0 @@ -pyparsing-2.4.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pyparsing-2.4.7.dist-info/LICENSE,sha256=ENUSChaAWAT_2otojCIL-06POXQbVzIGBNRVowngGXI,1023 -pyparsing-2.4.7.dist-info/METADATA,sha256=Ry40soZZiZrAkSMQT_KU1_1REe6FKa5UWzbT6YA8Mxs,3636 -pyparsing-2.4.7.dist-info/RECORD,, -pyparsing-2.4.7.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 -pyparsing-2.4.7.dist-info/top_level.txt,sha256=eUOjGzJVhlQ3WS2rFAy2mN3LX_7FKTM5GSJ04jfnLmU,10 -pyparsing.py,sha256=oxX_ZOz8t-eros-UWY7nJgcdUgD-rQ53Ck0qp7_v3Ig,273365 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/WHEEL deleted file mode 100644 index ef99c6c..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.34.2) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/top_level.txt deleted file mode 100644 index 210dfec..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -pyparsing diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing.py deleted file mode 100755 index 581d5bb..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing.py +++ /dev/null @@ -1,7107 +0,0 @@ -# -*- coding: utf-8 -*- -# module pyparsing.py -# -# Copyright (c) 2003-2019 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars -============================================================================= - -The pyparsing module is an alternative approach to creating and -executing simple grammars, vs. the traditional lex/yacc approach, or the -use of regular expressions. With pyparsing, you don't need to learn -a new syntax for defining grammars or matching expressions - the parsing -module provides a library of classes that you use to construct the -grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -``", !"``), built up using :class:`Word`, -:class:`Literal`, and :class:`And` elements -(the :class:`'+'` operators create :class:`And` expressions, -and the strings are auto-converted to :class:`Literal` expressions):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the -self-explanatory class names, and the use of '+', '|' and '^' operators. - -The :class:`ParseResults` object returned from -:class:`ParserElement.parseString` can be -accessed as a nested list, a dictionary, or an object with named -attributes. - -The pyparsing module handles some of the problems that are typically -vexing when writing text parsers: - - - extra or missing whitespace (the above program will also handle - "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments - - -Getting Started - ------------------ -Visit the classes :class:`ParserElement` and :class:`ParseResults` to -see the base classes that most other pyparsing -classes inherit from. Use the docstrings for examples of how to: - - - construct literal match expressions from :class:`Literal` and - :class:`CaselessLiteral` classes - - construct character word-group expressions using the :class:`Word` - class - - see how to create repetitive expressions using :class:`ZeroOrMore` - and :class:`OneOrMore` classes - - use :class:`'+'`, :class:`'|'`, :class:`'^'`, - and :class:`'&'` operators to combine simple expressions into - more complex ones - - associate names with your parsed results using - :class:`ParserElement.setResultsName` - - access the parsed data, which is returned as a :class:`ParseResults` - object - - find some helpful expression short-cuts like :class:`delimitedList` - and :class:`oneOf` - - find more useful common expressions in the :class:`pyparsing_common` - namespace class -""" - -__version__ = "2.4.7" -__versionTime__ = "30 Mar 2020 00:43 UTC" -__author__ = "Paul McGuire " - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -import collections -import pprint -import traceback -import types -from datetime import datetime -from operator import itemgetter -import itertools -from functools import wraps -from contextlib import contextmanager - -try: - # Python 3 - from itertools import filterfalse -except ImportError: - from itertools import ifilterfalse as filterfalse - -try: - from _thread import RLock -except ImportError: - from threading import RLock - -try: - # Python 3 - from collections.abc import Iterable - from collections.abc import MutableMapping, Mapping -except ImportError: - # Python 2.7 - from collections import Iterable - from collections import MutableMapping, Mapping - -try: - from collections import OrderedDict as _OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict as _OrderedDict - except ImportError: - _OrderedDict = None - -try: - from types import SimpleNamespace -except ImportError: - class SimpleNamespace: pass - -# version compatibility configuration -__compat__ = SimpleNamespace() -__compat__.__doc__ = """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an And expression is nested within an Or or MatchFirst; set to - True to enable bugfix released in pyparsing 2.3.0, or False to preserve - pre-2.3.0 handling of named results -""" -__compat__.collect_all_And_tokens = True - -__diag__ = SimpleNamespace() -__diag__.__doc__ = """ -Diagnostic configuration (all default to False) - - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results - name is defined on a MatchFirst or Or expression with one or more And subexpressions - (only warns if __compat__.collect_all_And_tokens is False) - - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined - with a results name, but has no contents defined - - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is - incorrectly called with multiple str arguments - - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent - calls to ParserElement.setName() -""" -__diag__.warn_multiple_tokens_in_named_alternation = False -__diag__.warn_ungrouped_named_tokens_in_collection = False -__diag__.warn_name_set_on_empty_Forward = False -__diag__.warn_on_multiple_string_args_to_oneof = False -__diag__.enable_debug_on_named_expressions = False -__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")] - -def _enable_all_warnings(): - __diag__.warn_multiple_tokens_in_named_alternation = True - __diag__.warn_ungrouped_named_tokens_in_collection = True - __diag__.warn_name_set_on_empty_Forward = True - __diag__.warn_on_multiple_string_args_to_oneof = True -__diag__.enable_all_warnings = _enable_all_warnings - - -__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__', - 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', - 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', - 'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', - 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', - 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', - 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', - 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', - 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', - 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', - 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', - 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', - 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', - 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', - 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', - 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', - 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', - 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass', - 'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', - 'conditionAsParseAction', 're', - ] - -system_version = tuple(sys.version_info)[:3] -PY_3 = system_version[0] == 3 -if PY_3: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - unicode = str - _ustr = str - - # build list of single arg builtins, that can be used as parse actions - singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] - -else: - _MAX_INT = sys.maxint - range = xrange - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode - friendly. It first tries str(obj). If that fails with - a UnicodeEncodeError, then it tries unicode(obj). It then - < returns the unicode object | encodes it with the default - encoding | ... >. - """ - if isinstance(obj, unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # Else encode it - ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') - xmlcharref = Regex(r'&#\d+;') - xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) - return xmlcharref.transformString(ret) - - # build list of single arg builtins, tolerant of Python version, that can be used as parse actions - singleArgBuiltins = [] - import __builtin__ - - for fname in "sum len sorted reversed list tuple set any all min max".split(): - try: - singleArgBuiltins.append(getattr(__builtin__, fname)) - except AttributeError: - continue - -_generatorType = type((y for y in range(1))) - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - from_symbols = '&><"\'' - to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split()) - for from_, to_ in zip(from_symbols, to_symbols): - data = data.replace(from_, to_) - return data - -alphas = string.ascii_uppercase + string.ascii_lowercase -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join(c for c in string.printable if c not in string.whitespace) - - -def conditionAsParseAction(fn, message=None, fatal=False): - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__(self, pstr, loc=0, msg=None, elem=None): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - self.args = (pstr, loc, msg) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - def __getattr__(self, aname): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if aname == "lineno": - return lineno(self.loc, self.pstr) - elif aname in ("col", "column"): - return col(self.loc, self.pstr) - elif aname == "line": - return line(self.loc, self.pstr) - else: - raise AttributeError(aname) - - def __str__(self): - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ', found end of text' - else: - foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\') - else: - foundstr = '' - return ("%s%s (at char %d), (line:%d, col:%d)" % - (self.msg, foundstr, self.loc, self.lineno, self.column)) - def __repr__(self): - return _ustr(self) - def markInputline(self, markerString=">!<"): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join((line_str[:line_column], - markerString, line_str[line_column:])) - return line_str.strip() - def __dir__(self): - return "lineno col line".split() + dir(type(self)) - -class ParseException(ParseBaseException): - """ - Exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - - Example:: - - try: - Word(nums).setName("integer").parseString("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.col)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - @staticmethod - def explain(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `setName` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - explain() is only supported under Python 3. - """ - import inspect - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(' ' * (exc.col - 1) + '^') - ret.append("{0}: {1}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get('self', None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'): - continue - if f_self in seen: - continue - seen.add(f_self) - - self_type = type(f_self) - ret.append("{0}.{1} - {2}".format(self_type.__module__, - self_type.__name__, - f_self)) - elif f_self is not None: - self_type = type(f_self) - ret.append("{0}.{1}".format(self_type.__module__, - self_type.__name__)) - else: - code = frm.f_code - if code.co_name in ('wrapper', ''): - continue - - ret.append("{0}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return '\n'.join(ret) - - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - pass - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by :class:`ParserElement.validate` if the - grammar could be improperly recursive - """ - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self, p1, p2): - self.tup = (p1, p2) - def __getitem__(self, i): - return self.tup[i] - def __repr__(self): - return repr(self.tup[0]) - def setOffset(self, i): - self.tup = (self.tup[0], i) - -class ParseResults(object): - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.`` - see :class:`ParserElement.setResultsName`) - - Example:: - - integer = Word(nums) - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - # equivalent form: - # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - # parseString returns a ParseResults object - result = date_str.parseString("1999/12/31") - - def test(s, fn=repr): - print("%s -> %s" % (s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: 31 - - month: 12 - - year: 1999 - """ - def __new__(cls, toklist=None, name=None, asList=True, modal=True): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - self.__asList = asList - self.__modal = modal - if toklist is None: - toklist = [] - if isinstance(toklist, list): - self.__toklist = toklist[:] - elif isinstance(toklist, _generatorType): - self.__toklist = list(toklist) - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name, int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])): - if isinstance(toklist, basestring): - toklist = [toklist] - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - self[name] = toklist - - def __getitem__(self, i): - if isinstance(i, (int, slice)): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([v[0] for v in self.__tokdict[i]]) - - def __setitem__(self, k, v, isinstance=isinstance): - if isinstance(v, _ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k, list()) + [v] - sub = v[0] - elif isinstance(k, (int, slice)): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)] - sub = v - if isinstance(sub, ParseResults): - sub.__parent = wkref(self) - - def __delitem__(self, i): - if isinstance(i, (int, slice)): - mylen = len(self.__toklist) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name, occurrences in self.__tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__(self, k): - return k in self.__tokdict - - def __len__(self): - return len(self.__toklist) - - def __bool__(self): - return (not not self.__toklist) - __nonzero__ = __bool__ - - def __iter__(self): - return iter(self.__toklist) - - def __reversed__(self): - return iter(self.__toklist[::-1]) - - def _iterkeys(self): - if hasattr(self.__tokdict, "iterkeys"): - return self.__tokdict.iterkeys() - else: - return iter(self.__tokdict) - - def _itervalues(self): - return (self[k] for k in self._iterkeys()) - - def _iteritems(self): - return ((k, self[k]) for k in self._iterkeys()) - - if PY_3: - keys = _iterkeys - """Returns an iterator of all named result keys.""" - - values = _itervalues - """Returns an iterator of all named result values.""" - - items = _iteritems - """Returns an iterator of all named result key-value tuples.""" - - else: - iterkeys = _iterkeys - """Returns an iterator of all named result keys (Python 2.x only).""" - - itervalues = _itervalues - """Returns an iterator of all named result values (Python 2.x only).""" - - iteritems = _iteritems - """Returns an iterator of all named result key-value tuples (Python 2.x only).""" - - def keys(self): - """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iterkeys()) - - def values(self): - """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.itervalues()) - - def items(self): - """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iteritems()) - - def haskeys(self): - """Since keys() returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self.__tokdict) - - def pop(self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - def remove_first(tokens): - tokens.pop(0) - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + OneOrMore(Word(nums)) - print(patt.parseString("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.addParseAction(remove_LABEL) - print(patt.parseString("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: AAB - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k, v in kwargs.items(): - if k == 'default': - args = (args[0], v) - else: - raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) - or len(args) == 1 - or args[0] in self): - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, defaultValue=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``defaultValue`` or ``None`` if no - ``defaultValue`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return defaultValue - - def insert(self, index, insStr): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] - """ - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name, occurrences in self.__tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def append(self, item): - """ - Add single element to end of ParseResults list of elements. - - Example:: - - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] - """ - self.__toklist.append(item) - - def extend(self, itemseq): - """ - Add sequence of elements to end of ParseResults list of elements. - - Example:: - - patt = OneOrMore(Word(alphas)) - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self.__iadd__(itemseq) - else: - self.__toklist.extend(itemseq) - - def clear(self): - """ - Clear all elements and results names. - """ - del self.__toklist[:] - self.__tokdict.clear() - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - return "" - - def __add__(self, other): - ret = self.copy() - ret += other - return ret - - def __iadd__(self, other): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = lambda a: offset if a < 0 else a + offset - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) - for k, vlist in otheritems for v in vlist] - for k, v in otherdictitems: - self[k] = v - if isinstance(v[0], ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update(other.__accumNames) - return self - - def __radd__(self, other): - if isinstance(other, int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__(self): - return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict)) - - def __str__(self): - return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' - - def _asStringList(self, sep=''): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance(item, ParseResults): - out += item._asStringList() - else: - out.append(_ustr(item)) - return out - - def asList(self): - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = OneOrMore(Word(alphas)) - result = patt.parseString("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] - - # Use asList() to create an actual list - result_list = result.asList() - print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist] - - def asDict(self): - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.asDict() - print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - if PY_3: - item_fn = self.items - else: - item_fn = self.iteritems - - def toItem(obj): - if isinstance(obj, ParseResults): - if obj.haskeys(): - return obj.asDict() - else: - return [toItem(v) for v in obj] - else: - return obj - - return dict((k, toItem(v)) for k, v in item_fn()) - - def copy(self): - """ - Returns a new copy of a :class:`ParseResults` object. - """ - ret = ParseResults(self.__toklist) - ret.__tokdict = dict(self.__tokdict.items()) - ret.__parent = self.__parent - ret.__accumNames.update(self.__accumNames) - ret.__name = self.__name - return ret - - def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True): - """ - (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. - """ - nl = "\n" - out = [] - namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items() - for v in vlist) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [nl, indent, "<", selfTag, ">"] - - for i, res in enumerate(self.__toklist): - if isinstance(res, ParseResults): - if i in namedItems: - out += [res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - ""] - - out += [nl, indent, ""] - return "".join(out) - - def __lookup(self, sub): - for k, vlist in self.__tokdict.items(): - for v, loc in vlist: - if sub is v: - return k - return None - - def getName(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = OneOrMore(user_data) - - result = user_info.parseString("22 111-22-3333 #221B") - for item in result: - print(item.getName(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 - and len(self.__tokdict) == 1 - and next(iter(self.__tokdict.values()))[0][1] in (0, -1)): - return next(iter(self.__tokdict.keys())) - else: - return None - - def dump(self, indent='', full=True, include_list=True, _depth=0): - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(result.dump()) - - prints:: - - ['12', '/', '31', '/', '1999'] - - day: 1999 - - month: 31 - - year: 12 - """ - out = [] - NL = '\n' - if include_list: - out.append(indent + _ustr(self.asList())) - else: - out.append('') - - if full: - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append("%s%s- %s: " % (indent, (' ' * _depth), k)) - if isinstance(v, ParseResults): - if v: - out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1)) - else: - out.append(_ustr(v)) - else: - out.append(repr(v)) - elif any(isinstance(vv, ParseResults) for vv in self): - v = self - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent, - (' ' * (_depth)), - i, - indent, - (' ' * (_depth + 1)), - vv.dump(indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1))) - else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent, - (' ' * (_depth)), - i, - indent, - (' ' * (_depth + 1)), - _ustr(vv))) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint `_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint `_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimitedList(term))) - result = func.parseString("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.asList(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return (self.__toklist, - (self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name)) - - def __setstate__(self, state): - self.__toklist = state[0] - self.__tokdict, par, inAccumNames, self.__name = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __getnewargs__(self): - return self.__toklist, self.__name, self.__asList, self.__modal - - def __dir__(self): - return dir(type(self)) + list(self.keys()) - - @classmethod - def from_dict(cls, other, name=None): - """ - Helper classmethod to construct a ParseResults from a dict, preserving the - name-value relations as results names. If an optional 'name' argument is - given, a nested ParseResults will be returned - """ - def is_iterable(obj): - try: - iter(obj) - except Exception: - return False - else: - if PY_3: - return not isinstance(obj, (str, bytes)) - else: - return not isinstance(obj, basestring) - - ret = cls([]) - for k, v in other.items(): - if isinstance(v, Mapping): - ret += cls.from_dict(v, name=k) - else: - ret += cls([v], name=k, asList=is_iterable(v)) - if name is not None: - ret = cls([ret], name=name) - return ret - -MutableMapping.register(ParseResults) - -def col (loc, strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parseString` for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) - -def lineno(loc, strg): - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parseString` - for more information on parsing strings containing ```` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n", 0, loc) + 1 - -def line(loc, strg): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR + 1:nextCR] - else: - return strg[lastCR + 1:] - -def _defaultStartDebugAction(instring, loc, expr): - print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring)))) - -def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks): - print("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction(instring, loc, expr, exc): - print("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -# Only works on Python 3.x - nonlocal is toxic to Python 2 installs -#~ 'decorator to trim function calls to match the arity of the target' -#~ def _trim_arity(func, maxargs=3): - #~ if func in singleArgBuiltins: - #~ return lambda s,l,t: func(t) - #~ limit = 0 - #~ foundArity = False - #~ def wrapper(*args): - #~ nonlocal limit,foundArity - #~ while 1: - #~ try: - #~ ret = func(*args[limit:]) - #~ foundArity = True - #~ return ret - #~ except TypeError: - #~ if limit == maxargs or foundArity: - #~ raise - #~ limit += 1 - #~ continue - #~ return wrapper - -# this version is Python 2.x-3.x cross-compatible -'decorator to trim function calls to match the arity of the target' -def _trim_arity(func, maxargs=2): - if func in singleArgBuiltins: - return lambda s, l, t: func(t) - limit = [0] - foundArity = [False] - - # traceback return data structure changed in Py3.5 - normalize back to plain tuples - if system_version[:2] >= (3, 5): - def extract_stack(limit=0): - # special handling for Python 3.5.0 - extra deep call stack by 1 - offset = -3 if system_version == (3, 5, 0) else -2 - frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset] - return [frame_summary[:2]] - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - else: - extract_stack = traceback.extract_stack - extract_tb = traceback.extract_tb - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - LINE_DIFF = 6 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - this_line = extract_stack(limit=2)[-1] - pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF) - - def wrapper(*args): - while 1: - try: - ret = func(*args[limit[0]:]) - foundArity[0] = True - return ret - except TypeError: - # re-raise TypeErrors if they did not come from our arity testing - if foundArity[0]: - raise - else: - try: - tb = sys.exc_info()[-1] - if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: - raise - finally: - try: - del tb - except NameError: - pass - - if limit[0] <= maxargs: - limit[0] += 1 - continue - raise - - # copy func name to wrapper for sensible debug output - func_name = "" - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - wrapper.__name__ = func_name - - return wrapper - - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - @staticmethod - def setDefaultWhitespaceChars(chars): - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, and newline - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.setDefaultWhitespaceChars(" \t") - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - @staticmethod - def inlineLiteralsUsing(cls): - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inlineLiteralsUsing(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - @classmethod - def _trim_traceback(cls, tb): - while tb.tb_next: - tb = tb.tb_next - return tb - - def __init__(self, savelist=False): - self.parseAction = list() - self.failAction = None - # ~ self.name = "" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = (None, None, None) # custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy(self): - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName(self, name): - """ - Define name for this expression, makes debugging and exception messages clearer. - - Example:: - - Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) - Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.name = name - self.errmsg = "Expected " + self.name - if __diag__.enable_debug_on_named_expressions: - self.setDebug() - return self - - def setResultsName(self, name, listAllMatches=False): - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.setResultsName("name")`` - - see :class:`__call__`. - - Example:: - - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self, breakFlag=True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``breakFlag`` to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def setParseAction(self, *fns, **kwargs): - """ - Define one or more actions to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` , - ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - If None is passed as the parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parseString for more - information on parsing strings containing ```` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - # use parse action to convert to ints at parse time - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - date_str = integer + '/' + integer + '/' + integer - - # note that integer fields are now ints, not strings - date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] - """ - if list(fns) == [None,]: - self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get("callDuringTry", False) - return self - - def addParseAction(self, *fns, **kwargs): - """ - Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. - - See examples in :class:`copy`. - """ - self.parseAction += list(map(_trim_arity, list(fns))) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def addCondition(self, *fns, **kwargs): - """Add a boolean predicate function to expression's list of parse actions. See - :class:`setParseAction` for function call signatures. Unlike ``setParseAction``, - functions passed to ``addCondition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException - - Example:: - - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) - """ - for fn in fns: - self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), - fatal=kwargs.get('fatal', False))) - - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def setFailAction(self, fn): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring, loc): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc, dummy = e._parse(instring, loc) - exprsFound = True - except ParseException: - pass - return loc - - def preParse(self, instring, loc): - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # ~ @profile - def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True): - TRY, MATCH, FAIL = 0, 1, 2 - debugging = (self.debug) # and doActions) - - if debugging or self.failAction: - # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring))) - if self.debugActions[TRY]: - self.debugActions[TRY](instring, loc, self) - try: - if callPreParse and self.callPreparse: - preloc = self.preParse(instring, loc) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or preloc >= len(instring): - try: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except IndexError: - raise ParseException(instring, len(instring), self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except Exception as err: - # ~ print ("Exception raised:", err) - if self.debugActions[FAIL]: - self.debugActions[FAIL](instring, tokensStart, self, err) - if self.failAction: - self.failAction(instring, tokensStart, self, err) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse(instring, loc) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or preloc >= len(instring): - try: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except IndexError: - raise ParseException(instring, len(instring), self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, preloc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokensStart, retTokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - exc.__cause__ = parse_action_exc - raise exc - - if tokens is not None and tokens is not retTokens: - retTokens = ParseResults(tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults) - except Exception as err: - # ~ print "Exception raised in user parse action:", err - if self.debugActions[FAIL]: - self.debugActions[FAIL](instring, tokensStart, self, err) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokensStart, retTokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - exc.__cause__ = parse_action_exc - raise exc - - if tokens is not None and tokens is not retTokens: - retTokens = ParseResults(tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults) - if debugging: - # ~ print ("Matched", self, "->", retTokens.asList()) - if self.debugActions[MATCH]: - self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens) - - return loc, retTokens - - def tryParse(self, instring, loc): - try: - return self._parse(instring, loc, doActions=False)[0] - except ParseFatalException: - raise ParseException(instring, loc, self.errmsg, self) - - def canParseNext(self, instring, loc): - try: - self.tryParse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - class _UnboundedCache(object): - def __init__(self): - cache = {} - self.not_in_cache = not_in_cache = object() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - - def clear(self): - cache.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - if _OrderedDict is not None: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = _OrderedDict() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - while len(cache) > size: - try: - cache.popitem(False) - except KeyError: - pass - - def clear(self): - cache.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - else: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = {} - key_fifo = collections.deque([], size) - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - while len(key_fifo) > size: - cache.pop(key_fifo.popleft(), None) - key_fifo.append(key) - - def clear(self): - cache.clear() - key_fifo.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache(self, instring, loc, doActions=True, callPreParse=True): - HIT, MISS = 0, 1 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy())) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if isinstance(value, Exception): - raise value - return value[0], value[1].copy() - - _parse = _parseNoCache - - @staticmethod - def resetCache(): - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) - - _packratEnabled = False - @staticmethod - def enablePackrat(cache_size_limit=128): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enablePackrat`. - For best results, call ``enablePackrat()`` immediately after - importing pyparsing. - - Example:: - - import pyparsing - pyparsing.ParserElement.enablePackrat() - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = ParserElement._UnboundedCache() - else: - ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parseString(self, instring, parseAll=False): - """ - Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - Returns the parsed data as a :class:`ParseResults` object, which may be - accessed as a list, or as a dict or object with attributes if the given parser - includes results names. - - If you want the grammar to require that the entire input string be - successfully parsed, then set ``parseAll`` to True (equivalent to ending - the grammar with ``StringEnd()``). - - Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the ``loc`` argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - - calling ``parseWithTabs`` on your grammar before calling ``parseString`` - (see :class:`parseWithTabs`) - - define your parse action using the full ``(s, loc, toks)`` signature, and - reference the input string using the parse action's ``s`` argument - - explictly expand the tabs in your input string before calling - ``parseString`` - - Example:: - - Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] - Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - # ~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - else: - return tokens - - def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``maxMatches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parseString` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scanString(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn(instring, loc) - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def transformString(self, instring): - """ - Extension to :class:`scanString`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transformString``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transformString()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transformString()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.setParseAction(lambda toks: toks[0].title()) - - print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t, s, e in self.scanString(instring): - out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.asList() - elif isinstance(t, list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join(map(_ustr, _flatten(out))) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def searchString(self, instring, maxMatches=_MAX_INT): - """ - Another extension to :class:`scanString`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``maxMatches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - try: - return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``includeSeparators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = oneOf(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - splits = 0 - last = 0 - for t, s, e in self.scanString(instring, maxMatches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other): - """ - Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. - - Literal('start') + ... + Literal('end') - - is equivalent to: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And([self, other]) - - def __radd__(self, other): - """ - Implementation of + operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """ - Implementation of - operator, returns :class:`And` with error stop - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return self + And._ErrorStop() + other - - def __rsub__(self, other): - """ - Implementation of - operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self, other): - """ - Implementation of * operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0, ) + other[1:] + (None,))[:2] - - if isinstance(other, int): - minElements, optElements = other, 0 - elif isinstance(other, tuple): - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0, 0)") - - if optElements: - def makeOptionalList(n): - if n > 1: - return Optional(self + makeOptionalList(n - 1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other): - """ - Implementation of | operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst([self, other]) - - def __ror__(self, other): - """ - Implementation of | operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other): - """ - Implementation of ^ operator - returns :class:`Or` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or([self, other]) - - def __rxor__(self, other): - """ - Implementation of ^ operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other): - """ - Implementation of & operator - returns :class:`Each` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each([self, other]) - - def __rand__(self, other): - """ - Implementation of & operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__(self): - """ - Implementation of ~ operator - returns :class:`NotAny` - """ - return NotAny(self) - - def __iter__(self): - # must implement __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - raise TypeError('%r object is not iterable' % self.__class__.__name__) - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception - if more than ``n`` ``expr``s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - """ - - # convert single arg keys to tuples - try: - if isinstance(key, str): - key = (key,) - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5], - '... [{0}]'.format(len(key)) - if len(key) > 5 else '')) - - # clip to 2 elements - ret = self * tuple(key[:2]) - return ret - - def __call__(self, name=None): - """ - Shortcut for :class:`setResultsName`, with ``listAllMatches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - else: - return self.copy() - - def suppress(self): - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def leaveWhitespace(self): - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars(self, chars): - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs(self): - """ - Overrides default behavior to expand ````s to spaces before parsing the input string. - Must be called before ``parseString`` when the input grammar contains elements that - match ```` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other): - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = OneOrMore(Word(alphas)) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] - - patt.ignore(cStyleComment) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] - """ - if isinstance(other, basestring): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def setDebugActions(self, startAction, successAction, exceptionAction): - """ - Enable display of debugging messages while doing pattern matching. - """ - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug(self, flag=True): - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to True to enable, False to disable. - - Example:: - - wd = Word(alphas).setName("alphaword") - integer = Word(nums).setName("numword") - term = wd | integer - - # turn on debugging for wd - wd.setDebug() - - OneOrMore(term).parseString("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`setDebugActions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. - """ - if flag: - self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) - else: - self.debug = False - return self - - def __str__(self): - return self.name - - def __repr__(self): - return _ustr(self) - - def streamline(self): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion(self, parseElementList): - pass - - def validate(self, validateTrace=None): - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self.checkRecursion([]) - - def parseFile(self, file_or_filename, parseAll=False): - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r") as f: - file_contents = f.read() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, basestring): - return self.matches(other) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return id(self) - - def __req__(self, other): - return self == other - - def __rne__(self, other): - return not (self == other) - - def matches(self, testString, parseAll=True): - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - testString - to test against this expression for a match - - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - try: - self.parseString(_ustr(testString), parseAll=parseAll) - return True - except ParseBaseException: - return False - - def runTests(self, tests, parseAll=True, comment='#', - fullDump=True, printResults=True, failureTests=False, postParse=None, - file=None): - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests - - comment - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - printResults - (default= ``True``) prints test output to stdout - - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing - - postParse - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - file - (default=``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failureTests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.runTests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.runTests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failureTests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading 'r'.) - """ - if isinstance(tests, basestring): - tests = list(map(str.strip, tests.rstrip().splitlines())) - if isinstance(comment, basestring): - comment = Literal(comment) - if file is None: - file = sys.stdout - print_ = file.write - - allResults = [] - comments = [] - success = True - NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString) - BOM = u'\ufeff' - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append(t) - continue - if not t: - continue - out = ['\n' + '\n'.join(comments) if comments else '', t] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transformString(t.lstrip(BOM)) - result = self.parseString(t, parseAll=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - if '\n' in t: - out.append(line(pe.loc, t)) - out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal) - else: - out.append(' ' * pe.loc + '^' + fatal) - out.append("FAIL: " + str(pe)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: " + str(exc)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) - else: - out.append(result.dump(full=fullDump)) - - if printResults: - if fullDump: - out.append('') - print_('\n'.join(out)) - - allResults.append((t, result)) - - return success, allResults - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr, must_skip=False): - super(_PendingSkip, self).__init__() - self.strRepr = str(expr + Empty()).replace('Empty', '...') - self.name = self.strRepr - self.anchor = expr - self.must_skip = must_skip - - def __add__(self, other): - skipper = SkipTo(other).setName("...")("_skipped*") - if self.must_skip: - def must_skip(t): - if not t._skipped or t._skipped.asList() == ['']: - del t[0] - t.pop("_skipped", None) - def show_skip(t): - if t._skipped.asList()[-1:] == ['']: - skipped = t.pop('_skipped') - t['_skipped'] = 'missing <' + repr(self.anchor) + '>' - return (self.anchor + skipper().addParseAction(must_skip) - | skipper().addParseAction(show_skip)) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.strRepr - - def parseImpl(self, *args): - raise Exception("use of `...` expression without following SkipTo target expression") - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - def __init__(self): - super(Token, self).__init__(savelist=False) - - -class Empty(Token): - """An empty token, will always match. - """ - def __init__(self): - super(Empty, self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """A token that will never match. - """ - def __init__(self): - super(NoMatch, self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """Token to exactly match a specified string. - - Example:: - - Literal('blah').parseString('blah') # -> ['blah'] - Literal('blah').parseString('blahfooblah') # -> ['blah'] - Literal('blah').parseString('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - def __init__(self, matchString): - super(Literal, self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: modify __class__ to select - # a parseImpl optimized for single-character check - if self.matchLen == 1 and type(self) is Literal: - self.__class__ = _SingleCharLiteral - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - -_L = Literal -ParserElement._literalStringClass = Literal - -class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parseString("start") # -> ['start'] - Keyword("start").parseString("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__(self, matchString, identChars=None, caseless=False): - super(Keyword, self).__init__() - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def parseImpl(self, instring, loc, doActions=True): - if self.caseless: - if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch) - and (loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars) - and (loc == 0 - or instring[loc - 1].upper() not in self.identChars)): - return loc + self.matchLen, self.match - - else: - if instring[loc] == self.firstMatchChar: - if ((self.matchLen == 1 or instring.startswith(self.match, loc)) - and (loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars) - and (loc == 0 or instring[loc - 1] not in self.identChars)): - return loc + self.matchLen, self.match - - raise ParseException(instring, loc, self.errmsg, self) - - def copy(self): - c = super(Keyword, self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - @staticmethod - def setDefaultKeywordChars(chars): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - -class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - def __init__(self, matchString): - super(CaselessLiteral, self).__init__(matchString.upper()) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc:loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - def __init__(self, matchString, identChars=None): - super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True) - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``maxMismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) - patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - def __init__(self, match_string, maxMismatches=1): - super(CloseMatch, self).__init__() - self.name = match_string - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) - self.mayIndexError = False - self.mayReturnEmpty = False - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)): - src, mat = s_m - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results['original'] = match_string - results['mismatches'] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, an - optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. An optional ``excludeChars`` parameter can - list characters that might be found in the input ``bodyChars`` - string; useful to define a word of all printables except for one or - two characters, for instance. - - :class:`srange` is useful for defining custom character set strings - for defining ``Word`` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, excludeChars=",") - """ - def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None): - super(Word, self).__init__() - if excludeChars: - excludeChars = set(excludeChars) - initChars = ''.join(c for c in initChars if c not in excludeChars) - if bodyChars: - bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) - self.initCharsOrig = initChars - self.initChars = set(initChars) - if bodyChars: - self.bodyCharsOrig = bodyChars - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.initCharsOrig) == 1: - self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b" + self.reString + r"\b" - - try: - self.re = re.compile(self.reString) - except Exception: - self.re = None - else: - self.re_match = self.re.match - self.__class__ = _WordRegex - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword: - if (start > 0 and instring[start - 1] in bodychars - or loc < instrlen and instring[loc] in bodychars): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__(self): - try: - return super(Word, self).__str__() - except Exception: - pass - - if self.strRepr is None: - - def charsAsStr(s): - if len(s) > 4: - return s[:4] + "..." - else: - return s - - if self.initCharsOrig != self.bodyCharsOrig: - self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig)) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - -class _WordRegex(Word): - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(_WordRegex): - """A short-cut class for defining ``Word(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - def __init__(self, charset, asKeyword=False, excludeChars=None): - super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars) - self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars)) - if asKeyword: - self.reString = r"\b%s\b" % self.reString - self.re = re.compile(self.reString) - self.re_match = self.re.match - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module `_. - If the given regex contains named groups (defined using ``(?P...)``), - these will be preserved as named parse results. - - If instead of the Python stdlib re module you wish to use a different RE module - (such as the `regex` module), you can replace it by either building your - Regex object with a compiled RE that was compiled using regex: - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # use regex module instead of stdlib re module to construct a Regex using - # a compiled regular expression - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - - """ - def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module `_ module for an - explanation of the acceptable patterns and flags. - """ - super(Regex, self).__init__() - - if isinstance(pattern, basestring): - if not pattern: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'): - self.re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError("Regex may only be constructed with a string or a compiled RE object") - - self.re_match = self.re.match - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = self.re_match("") is not None - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList - if self.asMatch: - self.parseImpl = self.parseImplAsMatch - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def __str__(self): - try: - return super(Regex, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - def sub(self, repl): - r""" - Return Regex with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) `_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") - print(make_html.transformString("h1:main title:")) - # prints "

main title

" - """ - if self.asGroupList: - warnings.warn("cannot use sub() with Regex(asGroupList=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch and callable(repl): - warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch: - def pa(tokens): - return tokens[0].expand(repl) - else: - def pa(tokens): - return self.re.sub(repl, tokens[0]) - return self.addParseAction(pa) - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - quoteChar - string of one or more characters defining the - quote delimiting string - - escChar - character to escape quotes, typically backslash - (default= ``None``) - - escQuote - special quote sequence to escape an embedded quote - string (such as SQL's ``""`` to escape an embedded ``"``) - (default= ``None``) - - multiline - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - unquoteResults - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - endQuoteChar - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quoteChar) - - convertWhitespaceEscapes - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.searchString('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', endQuoteChar='}}') - print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', escQuote='""') - print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, - unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString, self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if not quoteChar: - warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '')) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '')) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')') - - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen: -self.endQuoteCharLen] - - if isinstance(ret, basestring): - # replace escaped whitespace - if '\\' in ret and self.convertWhitespaceEscapes: - ws_map = { - r'\t': '\t', - r'\n': '\n', - r'\f': '\f', - r'\r': '\r', - } - for wslit, wschar in ws_map.items(): - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__(self): - try: - return super(QuotedString, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - def __init__(self, notChars, min=1, max=0, exact=0): - super(CharsNotIn, self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use " - "Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = (self.minLen == 0) - self.mayIndexError = False - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] in self.notChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__(self): - try: - return super(CharsNotIn, self).__str__() - except Exception: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - whiteStrs = { - ' ' : '', - '\t': '', - '\n': '', - '\r': '', - '\f': '', - u'\u00A0': '', - u'\u1680': '', - u'\u180E': '', - u'\u2000': '', - u'\u2001': '', - u'\u2002': '', - u'\u2003': '', - u'\u2004': '', - u'\u2005': '', - u'\u2006': '', - u'\u2007': '', - u'\u2008': '', - u'\u2009': '', - u'\u200A': '', - u'\u200B': '', - u'\u202F': '', - u'\u205F': '', - u'\u3000': '', - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White, self).__init__() - self.matchWhite = ws - self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite)) - # ~ self.leaveWhitespace() - self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__(self): - super(_PositionToken, self).__init__() - self.name = self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - def __init__(self, colno): - super(GoToColumn, self).__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col: - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc: newloc] - return newloc, ret - - -class LineStart(_PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).searchString(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - def __init__(self): - super(LineStart, self).__init__() - self.errmsg = "Expected start of line" - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - -class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - def __init__(self): - super(LineEnd, self).__init__() - self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", "")) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class StringStart(_PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - def __init__(self): - super(StringStart, self).__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class StringEnd(_PositionToken): - """Matches if current position is at the end of the parse string - """ - def __init__(self): - super(StringEnd, self).__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, - and is not preceded by any character in a given set of - ``wordChars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - def __init__(self, wordChars=printables): - super(WordStart, self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if (instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and is - not followed by any character in a given set of ``wordChars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - def __init__(self, wordChars=printables): - super(WordEnd, self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if (instring[loc] in self.wordChars or - instring[loc - 1] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - def __init__(self, exprs, savelist=False): - super(ParseExpression, self).__init__(savelist) - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, basestring): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, basestring) for expr in exprs): - exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def append(self, other): - self.exprs.append(other) - self.strRepr = None - return self - - def leaveWhitespace(self): - """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on - all contained expressions.""" - self.skipWhitespace = False - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leaveWhitespace() - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super(ParseExpression, self).ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super(ParseExpression, self).ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def __str__(self): - try: - return super(ParseExpression, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs)) - return self.strRepr - - def streamline(self): - super(ParseExpression, self).streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d) - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for Or's and MatchFirst's) - if len(self.exprs) == 2: - other = self.exprs[0] - if (isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug): - self.exprs = other.exprs[:] + [self.exprs[1]] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if (isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug): - self.exprs = self.exprs[:-1] + other.exprs[:] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + _ustr(self) - - return self - - def validate(self, validateTrace=None): - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self.checkRecursion([]) - - def copy(self): - ret = super(ParseExpression, self).copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_ungrouped_named_tokens_in_collection: - for e in self.exprs: - if isinstance(e, ParserElement) and e.resultsName: - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName), - stacklevel=3) - - return super(ParseExpression, self)._setResultsName(name, listAllMatches) - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = OneOrMore(Word(alphas)) - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super(And._ErrorStop, self).__init__(*args, **kwargs) - self.name = '-' - self.leaveWhitespace() - - def __init__(self, exprs, savelist=True): - exprs = list(exprs) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception("cannot construct And with sequence ending in ...") - else: - tmp.append(expr) - exprs[:] = tmp - super(And, self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars(self.exprs[0].whiteChars) - self.skipWhitespace = self.exprs[0].skipWhitespace - self.callPreparse = True - - def streamline(self): - # collapse any _PendingSkip's - if self.exprs: - if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1]): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if (isinstance(e, ParseExpression) - and e.exprs and isinstance(e.exprs[-1], _PendingSkip)): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super(And, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as last arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False) - errorStop = False - for e in self.exprs[1:]: - if isinstance(e, And._ErrorStop): - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException(instring, len(instring), self.errmsg, self) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - def __init__(self, exprs, savelist=False): - super(Or, self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(Or, self).streamline() - if __compat__.collect_all_And_tokens: - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - for e in self.exprs: - try: - loc2 = e.tryParse(instring, loc) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring, len(instring), e.errmsg, self) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - - def __ixor__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - def _setResultsName(self, name, listAllMatches=False): - if (not __compat__.collect_all_And_tokens - and __diag__.warn_multiple_tokens_in_named_alternation): - if any(isinstance(e, And) for e in self.exprs): - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "may only return a single token for an And alternative, " - "in future will return the full list of tokens".format( - "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), - stacklevel=3) - - return super(Or, self)._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - def __init__(self, exprs, savelist=False): - super(MatchFirst, self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(MatchFirst, self).streamline() - if __compat__.collect_all_And_tokens: - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse(instring, loc, doActions) - return ret - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring, len(instring), e.errmsg, self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - def _setResultsName(self, name, listAllMatches=False): - if (not __compat__.collect_all_And_tokens - and __diag__.warn_multiple_tokens_in_named_alternation): - if any(isinstance(e, And) for e in self.exprs): - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "may only return a single token for an And alternative, " - "in future will return the full list of tokens".format( - "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), - stacklevel=3) - - return super(MatchFirst, self)._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) - - shape_spec.runTests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - def __init__(self, exprs, savelist=True): - super(Each, self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self): - super(Each, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional)) - opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)] - opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))] - self.optionals = opt1 + opt2 - self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)] - self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)] - self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse(instring, tmpLoc) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - resultlist.append(results) - - finalResults = sum(resultlist, ParseResults([])) - return loc, finalResults - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - def __init__(self, expr, savelist=False): - super(ParseElementEnhance, self).__init__(savelist) - if isinstance(expr, basestring): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars(expr.whiteChars) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException("", loc, self.errmsg, self) - - def leaveWhitespace(self): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super(ParseElementEnhance, self).ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super(ParseElementEnhance, self).ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self): - super(ParseElementEnhance, self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr.checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__(self): - try: - return super(ParseElementEnhance, self).__str__() - except Exception: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr)) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - def __init__(self, expr): - super(FollowedBy, self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, Literal, Keyword, or - a Word or CharsNotIn with a specified exact or maximum length, then - the retreat parameter is not required. Otherwise, retreat must be - specified to give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - def __init__(self, expr, retreat=None): - super(PrecededBy, self).__init__(expr) - self.expr = self.expr().leaveWhitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, _PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat):loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1)+1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the '~' operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Optional(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infixNotation - boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - def __init__(self, expr): - super(NotAny, self).__init__(expr) - # ~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + _ustr(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.canParseNext(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - -class _MultipleMatch(ParseElementEnhance): - def __init__(self, expr, stopOn=None): - super(_MultipleMatch, self).__init__(expr) - self.saveAsList = True - ender = stopOn - if isinstance(ender, basestring): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender): - if isinstance(ender, basestring): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False) - try: - hasIgnoreExprs = (not not self.ignoreExprs) - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_ungrouped_named_tokens_in_collection: - for e in [self.expr] + getattr(self.expr, 'exprs', []): - if isinstance(e, ParserElement) and e.resultsName: - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName), - stacklevel=3) - - return super(_MultipleMatch, self)._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stopOn attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parseString(text).pprint() - """ - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - -class ZeroOrMore(_MultipleMatch): - """Optional repetition of zero or more of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example: similar to :class:`OneOrMore` - """ - def __init__(self, expr, stopOn=None): - super(ZeroOrMore, self).__init__(expr, stopOn=stopOn) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, [] - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -class Optional(ParseElementEnhance): - """Optional matching of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - default (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) - zip.runTests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - __optionalNotMatched = _NullToken() - - def __init__(self, expr, default=__optionalNotMatched): - super(Optional, self).__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - if self.defaultValue is not self.__optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([self.defaultValue]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [self.defaultValue] - else: - tokens = [] - return loc, tokens - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - -class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - expr - target expression marking the end of the data to be skipped - - include - (default= ``False``) if True, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element list). - - ignore - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - failOn - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the SkipTo is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quotedString) - string_data.setParseAction(tokenMap(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.searchString(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: 6 - - desc: Intermittent system crash - - issue_num: 101 - - sev: Critical - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: 14 - - desc: Spelling error on Login ('log|n') - - issue_num: 94 - - sev: Cosmetic - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: 47 - - desc: System slow when running too many reports - - issue_num: 79 - - sev: Minor - """ - def __init__(self, other, include=False, ignore=None, failOn=None): - super(SkipTo, self).__init__(other) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, basestring): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + _ustr(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - expr = self.expr - expr_parse = self.expr._parse - self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None - self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - -class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the '<<' operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, '|' has a lower precedence than '<<', so that:: - - fwdExpr << a | b | c - - will actually be evaluated as:: - - (fwdExpr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwdExpr << (a | b | c) - - Converting to use the '<<=' operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - def __init__(self, other=None): - super(Forward, self).__init__(other, savelist=False) - - def __lshift__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - self.expr = other - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars(self.expr.whiteChars) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return self - - def __ilshift__(self, other): - return self << other - - def leaveWhitespace(self): - self.skipWhitespace = False - return self - - def streamline(self): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - if self.strRepr is not None: - return self.strRepr - - # Avoid infinite recursion by setting a temporary strRepr - self.strRepr = ": ..." - - # Use the string representation of main expression. - retString = '...' - try: - if self.expr is not None: - retString = _ustr(self.expr)[:1000] - else: - retString = "None" - finally: - self.strRepr = self.__class__.__name__ + ": " + retString - return self.strRepr - - def copy(self): - if self.expr is not None: - return super(Forward, self).copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_name_set_on_empty_Forward: - if self.expr is None: - warnings.warn("{0}: setting results name {0!r} on {1} expression " - "that has no contained expression".format("warn_name_set_on_empty_Forward", - name, - type(self).__name__), - stacklevel=3) - - return super(Forward, self)._setResultsName(name, listAllMatches) - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - def __init__(self, expr, savelist=False): - super(TokenConverter, self).__init__(expr) # , savelist) - self.saveAsList = False - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parseString('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parseString('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) - """ - def __init__(self, expr, joinString="", adjacent=True): - super(Combine, self).__init__(expr) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super(Combine, self).ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']] - """ - def __init__(self, expr): - super(Group, self).__init__(expr) - self.saveAsList = True - - def postParse(self, instring, loc, tokenlist): - return [tokenlist] - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - # print attributes as plain groups - print(OneOrMore(attr_expr).parseString(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names - result = Dict(OneOrMore(Group(attr_expr))).parseString(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.asDict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - def __init__(self, expr): - super(Dict, self).__init__(expr) - self.saveAsList = True - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey, int): - ikey = _ustr(tok[0]).strip() - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - else: - dictvalue = tok.copy() # ParseResults(i) - del dictvalue[0] - if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self.resultsName: - return [tokenlist] - else: - return tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + ZeroOrMore(',' + wd) - print(wd_list1.parseString(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) - print(wd_list2.parseString(source)) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - - (See also :class:`delimitedList`.) - """ - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self): - return self - - -class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once. - """ - def __init__(self, methodCall): - self.callable = _trim_arity(methodCall) - self.called = False - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "") - def reset(self): - self.called = False - -def traceParseAction(f): - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:, , )"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @traceParseAction - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) - print(wds.parseString("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - < 3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("< ['aa', 'bb', 'cc'] - delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..." - if combine: - return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName) - else: - return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName) - -def countedArray(expr, intExpr=None): - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``intExpr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) - countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] - """ - arrayExpr = Forward() - def countFieldParseAction(s, l, t): - n = t[0] - arrayExpr << (n and Group(And([expr] * n)) or Group(empty)) - return [] - if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.setName("arrayLen") - intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...') - -def _flatten(L): - ret = [] - for i in L: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret - -def matchPreviousLiteral(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`matchPreviousExpr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - def copyTokenToRepeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.asList()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def matchPreviousExpr(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - def copyTokenToRepeater(s, l, t): - matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s, l, t): - theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException('', 0, '') - rep.setParseAction(mustMatchTheseTokens, callDuringTry=True) - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def _escapeRegexRangeChars(s): - # ~ escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return _ustr(s) - -def oneOf(strs, caseless=False, useRegex=True, asKeyword=False): - """Helper to quickly define a set of alternative Literals, and makes - sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - strs - a string of space-delimited literals, or a collection of - string literals - - caseless - (default= ``False``) - treat all literals as - caseless - - useRegex - (default= ``True``) - as an optimization, will - generate a Regex object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if - creating a :class:`Regex` raises an exception) - - asKeyword - (default=``False``) - enforce Keyword-style matching on the - generated expressions - - Example:: - - comp_oper = oneOf("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - if isinstance(caseless, basestring): - warnings.warn("More than one string argument passed to oneOf, pass " - "choices as a list or space-delimited string", stacklevel=2) - - if caseless: - isequal = (lambda a, b: a.upper() == b.upper()) - masks = (lambda a, b: b.upper().startswith(a.upper())) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = (lambda a, b: a == b) - masks = (lambda a, b: b.startswith(a)) - parseElementClass = Keyword if asKeyword else Literal - - symbols = [] - if isinstance(strs, basestring): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) - if not symbols: - return NoMatch() - - if not asKeyword: - # if not producing keywords, need to reorder to take care to avoid masking - # longer choices with shorter ones - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1:]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if not (caseless or asKeyword) and useRegex: - # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols])) - try: - if len(symbols) == len("".join(symbols)): - return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols)) - else: - return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols)) - except Exception: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) - -def dictOf(key, value): - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - print(OneOrMore(attr_expr).parseString(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) - - # similar to Dict, but simpler call format - result = dictOf(attr_label, attr_value).parseString(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.asDict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - -def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``asString`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`originalTextFor` contains expressions with defined - results names, you must set ``asString`` to ``False`` if you - want to preserve those results name values. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = makeHTMLTags(tag) - patt = originalTextFor(opener + SkipTo(closer) + closer) - print(patt.searchString(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - locMarker = Empty().setParseAction(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start: t._original_end] - else: - def extractText(s, l, t): - t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] - matchExpr.setParseAction(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - return matchExpr - -def ungroup(expr): - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).addParseAction(lambda t: t[0]) - -def locatedExpr(expr): - """Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().setParseAction(lambda s, l, t: l) - return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) - - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]" - -def srange(s): - r"""Helper to easily define string ranges for use in Word - construction. Borrows syntax from regexp '[]' string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) - except Exception: - return "" - -def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - def verifyCol(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transformString` (). - - Example:: - - num = Word(nums).setParseAction(lambda toks: int(toks[0])) - na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) - term = na | num - - OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [replStr] - -def removeQuotes(s, l, t): - """Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use removeQuotes to strip quotation marks from parsed results - quotedString.setParseAction(removeQuotes) - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - -def tokenMap(func, *args): - """Helper to define a parse action by mapping a function to all - elements of a ParseResults list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transformString`:: - - hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) - hex_ints.runTests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).setParseAction(tokenMap(str.upper)) - OneOrMore(upperword).runTests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).setParseAction(tokenMap(str.title)) - OneOrMore(wd).setParseAction(' '.join).runTests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - pa.__name__ = func_name - - return pa - -upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) -"""(Deprecated) Helper parse action to convert tokens to upper case. -Deprecated in favor of :class:`pyparsing_common.upcaseTokens`""" - -downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) -"""(Deprecated) Helper parse action to convert tokens to lower case. -Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" - -def _makeTags(tagStr, xml, - suppress_LT=Suppress("<"), - suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes) - openTag = (suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') - + suppress_GT) - else: - tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">") - openTag = (suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) - + Optional(Suppress("=") + tagAttrValue)))) - + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') - + suppress_GT) - closeTag = Combine(_L("", adjacent=False) - - openTag.setName("<%s>" % resname) - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy())) - closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - -def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # makeHTMLTags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = makeHTMLTags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.searchString(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tagStr, False) - -def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`makeHTMLTags` - """ - return _makeTags(tagStr, True) - -def withAttribute(*args, **attrDict): - """Helper to create a validating parse action to be used with start - tags created with :class:`makeXMLTags` or - :class:`makeHTMLTags`. Use ``withAttribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ```` or ``
``. - - Call ``withAttribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`withClass`. - - To verify that the attribute exists, but without specifying a value, - pass ``withAttribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k, v) for k, v in attrs] - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """Simplified version of :class:`withAttribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr: classname}) - -opAssoc = SimpleNamespace() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infixNotation. See - :class:`ParserElement.enablePackrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the - nested - - opList - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(opExpr, - numTerms, rightLeftAssoc, parseAction)``, where: - - - opExpr is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if numTerms - is 3, opExpr is a tuple of two expressions, for the two - operators separating the 3 terms - - numTerms is the number of terms for this operator (must be 1, - 2, or 3) - - rightLeftAssoc is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``opAssoc.RIGHT`` and ``opAssoc.LEFT``. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``setParseAction(*fn)`` - (:class:`ParserElement.setParseAction`) - - lpar - expression for matching left-parentheses - (default= ``Suppress('(')``) - - rpar - expression for matching right-parentheses - (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.tryParse(instring, loc) - return loc, [] - - ret = Forward() - lastExpr = baseExpr | (lpar + ret + rpar) - for i, operDef in enumerate(opList): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr)) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr)) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) - elif arity == 3: - matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) - + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr)) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr)) - elif arity == 3: - matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.setParseAction(*pa) - else: - matchExpr.setParseAction(pa) - thisExpr <<= (matchExpr.setName(termName) | lastExpr) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of :class:`infixNotation`, will be -dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and - closing delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - closer - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - content - expression for items within the nested lists - (default= ``None``) - - ignoreExpr - expression for ignoring opening and closing - delimiters (default= :class:`quotedString`) - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignoreExpr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quotedString or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quotedString`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, basestring) and isinstance(closer, basestring): - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr - + CharsNotIn(opener - + closer - + ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).setParseAction(lambda t: t[0].strip())) - else: - content = (empty.copy() + CharsNotIn(opener - + closer - + ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t: t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) - ).setParseAction(lambda t: t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) - ).setParseAction(lambda t: t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.setName('nested %s%s expression' % (opener, closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single - grammar should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond - the current level; set to False for block of left-most - statements (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stack = indentStack[:] - - def reset_stack(): - indentStack[:] = backup_stack - - def checkPeerIndent(s, l, t): - if l >= len(s): return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): return - curCol = col(l, s) - if not(indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group(Optional(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) - + UNDENT) - else: - smExpr = Group(Optional(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) - + UNDENT) - smExpr.setFailAction(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\'')) -commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form ``/* ... */``" - -htmlComment = Regex(r"").setName("HTML comment") -"Comment of the form ````" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form ``// ... (to end of line)``" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") -"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" - -javaStyleComment = cppStyleComment -"Same as :class:`cppStyleComment`" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form ``# ... (to end of line)``" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') - + Optional(Word(" \t") - + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") -commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or -quoted strings, separated by commas. - -This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. -""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?(?:\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas + '_', alphanums + '_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) - + "::" - + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) - ).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the
pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") - + ~LineEnd() - + Word(printables, excludeChars=',') - + Optional(White(" \t")))).streamline().setName("commaItem") - comma_separated_list = delimitedList(Optional(quotedString.copy() - | _commasepitem, default='') - ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -class _lazyclassproperty(object): - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) - for superclass in cls.__mro__[1:]): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -class unicode_set(object): - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``, such as:: - - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - _ranges = [] - - @classmethod - def _get_chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in cc._ranges: - ret.extend(range(rr[0], rr[-1] + 1)) - return [unichr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - _ranges = [(32, sys.maxunicode)] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges = [(0x0100, 0x017f),] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges = [(0x0180, 0x024f),] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges = [ - (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d), - (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4), - (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges = [(0x0400, 0x04ff)] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges = [(0x3040, 0x309f),] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges = [(0x30a0, 0x30ff),] - - class Korean(unicode_set): - "Unicode set for Korean Unicode Character Range" - _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),] - - class CJK(Chinese, Japanese, Korean): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - pass - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges = [(0x0590, 0x05ff),] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)] - -pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges) - -# define ranges in language character sets -if PY_3: - setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic) - setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese) - setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic) - setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek) - setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew) - setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese) - setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji) - setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana) - setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana) - setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean) - setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai) - setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example: - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.setDefaultWhitespaceChars( - self._save_context["default_whitespace"] - ) - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - for name, value in self._save_context["__diag__"].items(): - setattr(__diag__, name, value) - ParserElement._packratEnabled = self._save_context["packrat_enabled"] - ParserElement._parse = self._save_context["packrat_parse"] - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - return self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a ParseResults object with an optional expected_list, - and compare any defined results names with an optional expected_dict. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.asList(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.asDict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ParseResults.asList() is equal to the expected_list. - """ - result = expr.parseString(test_string, parseAll=True) - if verbose: - print(result.dump()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ParseResults.asDict() is equal to the expected_dict. - """ - result = expr.parseString(test_string, parseAll=True) - if verbose: - print(result.dump()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ParserElement.runTests(). If a list of - list-dict tuples is given as the expected_parse_results argument, then these are zipped - with the report tuples returned by runTests and evaluated using assertParseResultsEquals. - Finally, asserts that the overall runTests() success value is True. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (rpt[0], rpt[1], expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/WHEEL deleted file mode 100644 index 3811098..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) -Root-Is-Purelib: false -Tag: cp37-cp37m-manylinux_2_17_x86_64 -Tag: cp37-cp37m-manylinux2014_x86_64 - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/jsonschema-3.2.0.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/LICENSE.mit b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/LICENSE.mit similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/LICENSE.mit rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/LICENSE.mit diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/METADATA similarity index 96% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/METADATA index e592ceb..32bd549 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/METADATA @@ -1,12 +1,12 @@ Metadata-Version: 2.1 Name: pyrsistent -Version: 0.18.1 +Version: 0.19.3 Summary: Persistent/Functional/Immutable data structures -Home-page: http://github.com/tobgu/pyrsistent/ +Home-page: https://github.com/tobgu/pyrsistent/ Author: Tobias Gustafsson Author-email: tobias.l.gustafsson@gmail.com License: MIT -Platform: UNKNOWN +Project-URL: Changelog, https://pyrsistent.readthedocs.io/en/latest/changes.html Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent @@ -14,6 +14,7 @@ Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: Implementation :: PyPy Requires-Python: >=3.7 Description-Content-Type: text/x-rst @@ -25,7 +26,8 @@ Pyrsistent :target: https://github.com/tobgu/pyrsistent/actions/workflows/tests.yaml -.. _Pyrthon: https://www.github.com/tobgu/pyrthon/ +.. _Pyrthon: https://www.github.com/tobgu/pyrthon +.. _Pyrsistent_extras: https://github.com/mingmingrr/pyrsistent-extras Pyrsistent is a number of persistent collections (by some referred to as functional data structures). Persistent in the sense that they are immutable. @@ -43,8 +45,13 @@ data structures are designed to share common elements through path copying. It aims at taking these concepts and make them as pythonic as possible so that they can be easily integrated into any python program without hassle. -If you want to go all in on persistent data structures and use literal syntax to define them in your code rather -than function calls check out Pyrthon_. +If you want use literal syntax to define them in your code rather +than function calls check out Pyrthon_. Be aware, that one is experimental, unmaintained and alpha software. + +If you cannot find the persistent data structure you're looking for here you may want to take a look at +Pyrsistent_extras_ which is maintained by @mingmingrr. If you still don't find what you're looking for please +open an issue for discussion. If we agree that functionality is missing you may want to go ahead and create +a Pull Request implement the missing functionality. Examples -------- @@ -726,6 +733,13 @@ Hugo van Kemenade https://github.com/hugovk Ben Beasley https://github.com/musicinmybrain +Noah C. Benson https://github.com/noahbenson + +dscrofts https://github.com/dscrofts + +Andy Reagan https://github.com/andyreagan + +Aaron Durant https://github.com/Aaron-Durant Contributing ------------ @@ -750,7 +764,7 @@ Release * Update README.rst with any new contributors and potential info needed. * Update _pyrsistent_version.py * Commit and tag with new version: `git add -u . && git commit -m 'Prepare version vX.Y.Z' && git tag -a vX.Y.Z -m 'vX.Y.Z'` -* Push commit and tags: `git push && git push --tags` +* Push commit and tags: `git push --follow-tags` * Build new release using Github actions Project status @@ -767,5 +781,3 @@ interested in working on any of them. If you feel that you have a grand master plan for where you would like Pyrsistent to go and have the time to put into it please don't hesitate to discuss this with me and submit PRs for it. If all goes well I'd be more than happy to add additional maintainers to the project! - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/RECORD similarity index 56% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/RECORD rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/RECORD index d22d18b..c73d33a 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/RECORD @@ -1,27 +1,26 @@ -_pyrsistent_version.py,sha256=2o8cZebVQq3IlZWOsEUKQhQ2rd7qwhVQIKyXuLPJU84,23 -pvectorc.cpython-37m-x86_64-linux-gnu.so,sha256=5x9BxBtF1ix_2DYLko4xuqn1lJSL0dABpkwIZAKZ6vw,178616 -pyrsistent-0.18.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pyrsistent-0.18.1.dist-info/LICENSE.mit,sha256=PqVnU8vQzIl9P0QUqQL1aUmR23wbOruCMCFjgRhfkRI,1060 -pyrsistent-0.18.1.dist-info/METADATA,sha256=gPU0vYxlMVObzxG7pBXT9zYcSCA5rmxwiAanCIKm6SI,26942 -pyrsistent-0.18.1.dist-info/RECORD,, -pyrsistent-0.18.1.dist-info/WHEEL,sha256=FGGRUrvefXOiAJfyJCZAiu4TpTMkXB_CzdEp3x2T96E,150 -pyrsistent-0.18.1.dist-info/top_level.txt,sha256=4RgY60Kwp0PJK6JLt04o336V4SZXK5QLBlfX0jZTNlw,40 +_pyrsistent_version.py,sha256=q9DiST9rd3tSvcPf5dmXUS2Qh0-2tS_-D12iObbuRb8,23 +pyrsistent-0.19.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pyrsistent-0.19.3.dist-info/LICENSE.mit,sha256=PqVnU8vQzIl9P0QUqQL1aUmR23wbOruCMCFjgRhfkRI,1060 +pyrsistent-0.19.3.dist-info/METADATA,sha256=0E4PZC-Q-A4iP7wBA-OgswrUBgtQRTpkRSs6m6RWH3E,27690 +pyrsistent-0.19.3.dist-info/RECORD,, +pyrsistent-0.19.3.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +pyrsistent-0.19.3.dist-info/top_level.txt,sha256=xUYpkuZgtli6uqL4sU6wQ9wjM6VsV1kT5FAdxNfbSNQ,31 pyrsistent/__init__.py,sha256=1cONLCDzbyDoZ_deek8Ei_iqOLI4oNseRPLF3zvmEoA,1479 pyrsistent/__init__.pyi,sha256=xrlyeqwkRQHxsSXZEoIR_PliQBSrMiEy8TpuyRpe3mY,7188 pyrsistent/_checked_types.py,sha256=bEjX28UYWdt-7vEhNjgbbMIhWZY3QL4cwhCSxCKRCsQ,18372 pyrsistent/_field_common.py,sha256=gBzLpQ0dqGI4UK7Y-ED6h20l6Dn0QkBEyrqGwbX_hWs,11963 pyrsistent/_helpers.py,sha256=vHWFBh4FaeXUZM4Pfqw1Zjz4kaPT_td3PmG-ExY3kv0,3232 -pyrsistent/_immutable.py,sha256=N5-gm7m0IMDMxt9nWidgXSBVXyniDJ0dJ1_J9_ip9UA,3534 +pyrsistent/_immutable.py,sha256=PkjvghcbB1XNXgam5H-W9uEaEpfZr1Ec2v_qzK1ED6o,3287 pyrsistent/_pbag.py,sha256=G5LgB5_eT_pOOkqLmZqGGIliVohnjBlZXzo_7C9qpqQ,6730 pyrsistent/_pclass.py,sha256=2mqz57Ojd7BpR7nJLrbMz35g414pW_ddnIEf3xPTRK4,9702 pyrsistent/_pdeque.py,sha256=Nlthy8fHSIFsoUteGVcyKao98OqZkZko7x9AxVLhxvM,12203 pyrsistent/_plist.py,sha256=dlteSIcFB4k95VFHoEwbB0YBsKwmEnY2JNeapX87hws,8293 -pyrsistent/_pmap.py,sha256=HokGiHJfmnU4dNEKnP3jkBBmCVvRyvh9orUl4FKn_Y0,14702 +pyrsistent/_pmap.py,sha256=eLahg3r946dhQYVgewfqNpOWIaPhc6sqAZ3zvDgFIG4,18781 pyrsistent/_precord.py,sha256=c8NTcjdSDdlXSCp40noMsLK-Zx8sPAWl8dWfbMuyzdA,7032 pyrsistent/_pset.py,sha256=7EKtC1khGTOn702PX5yauuaqD1CZvLkZJRDGiqMvQ-4,5693 pyrsistent/_pvector.py,sha256=tYz4cwokH9MYu0kAwafrlx-U0IJyIvxK8rFbRde_IAg,22694 -pyrsistent/_toolz.py,sha256=_FomPNhGhHO4hyIFaOeNpcbGiXRLD9dLdHhCnPYoie0,3428 +pyrsistent/_toolz.py,sha256=13LTsrZm_gfq1H3oZqxpez-P5PQi1s32hUNXmO7NNkk,3425 pyrsistent/_transformations.py,sha256=CF9KXf366Z2hl87a09M6NAiXDpZJ4iCEfhddJbUUNXU,3800 pyrsistent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 pyrsistent/typing.py,sha256=uzxxq11LHbhAcsNPnxDqORkPjy6HImGulJJ2zFBdRL0,1767 -pyrsistent/typing.pyi,sha256=Wg8-s8yeZo-WlRc_-TjBCjWV2d2vbdrw6T2oUzD7gps,10409 +pyrsistent/typing.pyi,sha256=ayN_wbAD_egdnVyZ_QdhX-qJT-vx-AdHkKrpC6ZkeLA,10416 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/WHEEL similarity index 65% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/WHEEL index 385faab..57e3d84 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/httplib2-0.19.1.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.36.2) +Generator: bdist_wheel (0.38.4) Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/top_level.txt similarity index 77% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/top_level.txt index f246072..49b3e16 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/top_level.txt +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.19.3.dist-info/top_level.txt @@ -1,3 +1,2 @@ _pyrsistent_version -pvectorc pyrsistent diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_checked_types.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_checked_types.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_field_common.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_field_common.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_helpers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_helpers.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_immutable.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_immutable.py old mode 100755 new mode 100644 index 7c75945..d23deca --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_immutable.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_immutable.py @@ -60,14 +60,9 @@ def frozen_member_test(): return '' - verbose_string = "" - if sys.version_info < (3, 7): - # Verbose is no longer supported in Python 3.7 - verbose_string = ", verbose={verbose}".format(verbose=verbose) - quoted_members = ', '.join("'%s'" % m for m in members) template = """ -class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})): +class {class_name}(namedtuple('ImmutableBase', [{quoted_members}])): __slots__ = tuple() def __repr__(self): @@ -87,7 +82,6 @@ def set(self, **kwargs): """.format(quoted_members=quoted_members, member_set="set([%s])" % quoted_members if quoted_members else 'set()', frozen_member_test=frozen_member_test(), - verbose_string=verbose_string, class_name=name) if verbose: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pbag.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pbag.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pclass.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pclass.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pdeque.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pdeque.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_plist.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_plist.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pmap.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pmap.py old mode 100755 new mode 100644 index 056d478..c6c7c7f --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pmap.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pmap.py @@ -3,6 +3,105 @@ from pyrsistent._pvector import pvector from pyrsistent._transformations import transform +class PMapView: + """View type for the persistent map/dict type `PMap`. + + Provides an equivalent of Python's built-in `dict_values` and `dict_items` + types that result from expreessions such as `{}.values()` and + `{}.items()`. The equivalent for `{}.keys()` is absent because the keys are + instead represented by a `PSet` object, which can be created in `O(1)` time. + + The `PMapView` class is overloaded by the `PMapValues` and `PMapItems` + classes which handle the specific case of values and items, respectively + + Parameters + ---------- + m : mapping + The mapping/dict-like object of which a view is to be created. This + should generally be a `PMap` object. + """ + # The public methods that use the above. + def __init__(self, m): + # Make sure this is a persistnt map + if not isinstance(m, PMap): + # We can convert mapping objects into pmap objects, I guess (but why?) + if isinstance(m, Mapping): + m = pmap(m) + else: + raise TypeError("PViewMap requires a Mapping object") + object.__setattr__(self, '_map', m) + + def __len__(self): + return len(self._map) + + def __setattr__(self, k, v): + raise TypeError("%s is immutable" % (type(self),)) + + def __reversed__(self): + raise TypeError("Persistent maps are not reversible") + +class PMapValues(PMapView): + """View type for the values of the persistent map/dict type `PMap`. + + Provides an equivalent of Python's built-in `dict_values` type that result + from expreessions such as `{}.values()`. See also `PMapView`. + + Parameters + ---------- + m : mapping + The mapping/dict-like object of which a view is to be created. This + should generally be a `PMap` object. + """ + def __iter__(self): + return self._map.itervalues() + + def __contains__(self, arg): + return arg in self._map.itervalues() + + # The str and repr methods imitate the dict_view style currently. + def __str__(self): + return f"pmap_values({list(iter(self))})" + + def __repr__(self): + return f"pmap_values({list(iter(self))})" + + def __eq__(self, x): + # For whatever reason, dict_values always seem to return False for == + # (probably it's not implemented), so we mimic that. + if x is self: return True + else: return False + +class PMapItems(PMapView): + """View type for the items of the persistent map/dict type `PMap`. + + Provides an equivalent of Python's built-in `dict_items` type that result + from expreessions such as `{}.items()`. See also `PMapView`. + + Parameters + ---------- + m : mapping + The mapping/dict-like object of which a view is to be created. This + should generally be a `PMap` object. + """ + def __iter__(self): + return self._map.iteritems() + + def __contains__(self, arg): + try: (k,v) = arg + except Exception: return False + return k in self._map and self._map[k] == v + + # The str and repr methods mitate the dict_view style currently. + def __str__(self): + return f"pmap_items({list(iter(self))})" + + def __repr__(self): + return f"pmap_items({list(iter(self))})" + + def __eq__(self, x): + if x is self: return True + elif not isinstance(x, type(self)): return False + else: return self._map == x._map class PMap(object): """ @@ -89,6 +188,12 @@ def __contains__(self, key): def __iter__(self): return self.iterkeys() + # If this method is not defined, then reversed(pmap) will attempt to reverse + # the map using len() and getitem, usually resulting in a mysterious + # KeyError. + def __reversed__(self): + raise TypeError("Persistent maps are not reversible") + def __getattr__(self, key): try: return self[key] @@ -115,13 +220,14 @@ def iteritems(self): yield k, v def values(self): - return pvector(self.itervalues()) + return PMapValues(self) def keys(self): - return pvector(self.iterkeys()) + from ._pset import PSet + return PSet(self) def items(self): - return pvector(self.iteritems()) + return PMapItems(self) def __len__(self): return self._size @@ -296,11 +402,9 @@ def __setitem__(self, key, val): self.set(key, val) def set(self, key, val): - if len(self._buckets_evolver) < 0.67 * self._size: - self._reallocate(2 * len(self._buckets_evolver)) - kv = (key, val) index, bucket = PMap._get_bucket(self._buckets_evolver, key) + reallocation_required = len(self._buckets_evolver) < 0.67 * self._size if bucket: for k, v in bucket: if k == key: @@ -310,17 +414,28 @@ def set(self, key, val): return self + # Only check and perform reallocation if not replacing an existing value. + # This is a performance tweak, see #247. + if reallocation_required: + self._reallocate() + return self.set(key, val) + new_bucket = [kv] new_bucket.extend(bucket) self._buckets_evolver[index] = new_bucket self._size += 1 else: + if reallocation_required: + self._reallocate() + return self.set(key, val) + self._buckets_evolver[index] = [kv] self._size += 1 return self - def _reallocate(self, new_size): + def _reallocate(self): + new_size = 2 * len(self._buckets_evolver) new_list = new_size * [None] buckets = self._buckets_evolver.persistent() for k, v in chain.from_iterable(x for x in buckets if x): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_precord.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_precord.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pset.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pset.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pvector.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_pvector.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_toolz.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_toolz.py old mode 100755 new mode 100644 index a7faed1..0bf2cb1 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_toolz.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_toolz.py @@ -4,7 +4,7 @@ See https://github.com/pytoolz/toolz/. -toolz is relased under BSD licence. Below is the licence text +toolz is released under BSD licence. Below is the licence text from toolz as it appeared when copying the code. -------------------------------------------------------------- @@ -72,7 +72,7 @@ def get_in(keys, coll, default=None, no_default=False): 0 >>> get_in(['y'], {}, no_default=True) Traceback (most recent call last): - ... + ... KeyError: 'y' """ try: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_transformations.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/_transformations.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/typing.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/typing.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/typing.pyi b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/typing.pyi index 0221c48..801dc70 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/typing.pyi +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent/typing.pyi @@ -68,7 +68,7 @@ class PVector(Sequence[T], Hashable): def __len__(self) -> int: ... def __mul__(self, other: PVector[T]) -> PVector[T]: ... def append(self, val: T) -> PVector[T]: ... - def delete(self, index: int, stop: Optional[int]) -> PVector[T]: ... + def delete(self, index: int, stop: Optional[int] = None) -> PVector[T]: ... def evolver(self) -> PVectorEvolver[T]: ... def extend(self, obj: Iterable[T]) -> PVector[T]: ... def tolist(self) -> List[T]: ... diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/RECORD deleted file mode 100644 index 6cc80ec..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/RECORD +++ /dev/null @@ -1,25 +0,0 @@ -requests-2.26.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -requests-2.26.0.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 -requests-2.26.0.dist-info/METADATA,sha256=hWaDQ1HOaMvAc-KaJFZgv1-fov-CKxGSwEIsZGhiBB8,4753 -requests-2.26.0.dist-info/RECORD,, -requests-2.26.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -requests-2.26.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 -requests-2.26.0.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9 -requests/__init__.py,sha256=h2SCZm8MM-UeAS48UuY7Lz97fEqsma6g6oVsKUi0eyw,4907 -requests/__version__.py,sha256=PZEyPTSIN_jRIAIB51wV7pw81m3qAw0InSR7OrKZUnE,441 -requests/_internal_utils.py,sha256=Zx3PnEUccyfsB-ie11nZVAW8qClJy0gx1qNME7rgT18,1096 -requests/adapters.py,sha256=WelSM1BCQXdbjEuDsBxqKDADeY8BHmxlrwbNnLN2rr4,21344 -requests/api.py,sha256=hjuoP79IAEmX6Dysrw8t032cLfwLHxbI_wM4gC5G9t0,6402 -requests/auth.py,sha256=OMoJIVKyRLy9THr91y8rxysZuclwPB-K1Xg1zBomUhQ,10207 -requests/certs.py,sha256=dOB5rV2DZ13dEhq9BUa_4hd5kAqg59e_zUZB00faYz8,453 -requests/compat.py,sha256=0fzHbLvfnFi2WR0o7XkrvXlElG0_VgQmmOgiooaXh_Y,1852 -requests/cookies.py,sha256=Y-bKX6TvW3FnYlE6Au0SXtVVWcaNdFvuAwQxw-G0iTI,18430 -requests/exceptions.py,sha256=AiGbnc4Z7wjlpETMcTwmsmYvZVep8Qmsd46_yp0h0mY,3238 -requests/help.py,sha256=ywPNssPohrch_Q_q4_JLJM1z2bP0TirHkA9QnoOF0sY,3968 -requests/hooks.py,sha256=QReGyy0bRcr5rkwCuObNakbYsc7EkiKeBwG4qHekr2Q,757 -requests/models.py,sha256=BtTPURThE_VpZZmUfDguGhZj5qUmqmY9rk9pIU3w3X4,34859 -requests/packages.py,sha256=kr9J9dYZr9Ef4JmwHbCEUgwViwcCyOUPgfXZvIL84Os,932 -requests/sessions.py,sha256=57O4ud9yRL6eLYh-dtFbqC1kO4d_EwZcCgYXEkujlfs,30168 -requests/status_codes.py,sha256=gT79Pbs_cQjBgp-fvrUgg1dn2DQO32bDj4TInjnMPSc,4188 -requests/structures.py,sha256=msAtr9mq1JxHd-JRyiILfdFlpbJwvvFuP3rfUQT_QxE,3005 -requests/utils.py,sha256=Ws5IP5Z7KdFkWNlMkk8L6M3iXaDwBt6CLwgWq8yXSF4,31382 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/WHEEL deleted file mode 100644 index 01b8fc7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.36.2) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyparsing-2.4.7.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/LICENSE similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/LICENSE diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/METADATA similarity index 71% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/METADATA index ca8fd13..606c27d 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: requests -Version: 2.26.0 +Version: 2.28.0 Summary: Python HTTP for Humans. Home-page: https://requests.readthedocs.io Author: Kenneth Reitz @@ -10,31 +10,33 @@ Project-URL: Documentation, https://requests.readthedocs.io Project-URL: Source, https://github.com/psf/requests Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers -Classifier: Natural Language :: English Classifier: License :: OSI Approved :: Apache Software License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries +Requires-Python: >=3.7, <4 Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: charset-normalizer (~=2.0.0) +Requires-Dist: idna (<4,>=2.5) Requires-Dist: urllib3 (<1.27,>=1.21.1) Requires-Dist: certifi (>=2017.4.17) -Requires-Dist: chardet (<5,>=3.0.2) ; python_version < "3" -Requires-Dist: idna (<3,>=2.5) ; python_version < "3" -Requires-Dist: charset-normalizer (~=2.0.0) ; python_version >= "3" -Requires-Dist: idna (<4,>=2.5) ; python_version >= "3" Provides-Extra: security Provides-Extra: socks Requires-Dist: PySocks (!=1.5.7,>=1.5.6) ; extra == 'socks' -Requires-Dist: win-inet-pton ; (sys_platform == "win32" and python_version == "2.7") and extra == 'socks' Provides-Extra: use_chardet_on_py3 Requires-Dist: chardet (<5,>=3.0.2) ; extra == 'use_chardet_on_py3' @@ -44,7 +46,7 @@ Requires-Dist: chardet (<5,>=3.0.2) ; extra == 'use_chardet_on_py3' ```python >>> import requests ->>> r = requests.get('https://api.github.com/user', auth=('user', 'pass')) +>>> r = requests.get('https://httpbin.org/basic-auth/user/pass', auth=('user', 'pass')) >>> r.status_code 200 >>> r.headers['content-type'] @@ -52,16 +54,16 @@ Requires-Dist: chardet (<5,>=3.0.2) ; extra == 'use_chardet_on_py3' >>> r.encoding 'utf-8' >>> r.text -'{"type":"User"...' +'{"authenticated": true, ...' >>> r.json() -{'disk_usage': 368627, 'private_gists': 484, ...} +{'authenticated': True, ...} ``` Requests allows you to send HTTP/1.1 requests extremely easily. There’s no need to manually add query strings to your URLs, or to form-encode your `PUT` & `POST` data — but nowadays, just use the `json` method! -Requests is one of the most downloaded Python package today, pulling in around `14M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `500,000+` repositories. You may certainly put your trust in this code. +Requests is one of the most downloaded Python packages today, pulling in around `30M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `1,000,000+` repositories. You may certainly put your trust in this code. -[![Downloads](https://pepy.tech/badge/requests/month)](https://pepy.tech/project/requests/month) +[![Downloads](https://pepy.tech/badge/requests/month)](https://pepy.tech/project/requests) [![Supported Versions](https://img.shields.io/pypi/pyversions/requests.svg)](https://pypi.org/project/requests) [![Contributors](https://img.shields.io/github/contributors/psf/requests.svg)](https://github.com/psf/requests/graphs/contributors) @@ -73,7 +75,7 @@ Requests is available on PyPI: $ python -m pip install requests ``` -Requests officially supports Python 2.7 & 3.6+. +Requests officially supports Python 3.7+. ## Supported Features & Best–Practices @@ -95,7 +97,7 @@ Requests is ready for the demands of building robust and reliable HTTP–speakin ## API Reference and User Guide available on [Read the Docs](https://requests.readthedocs.io) -[![Read the Docs](https://raw.githubusercontent.com/psf/requests/master/ext/ss.png)](https://requests.readthedocs.io) +[![Read the Docs](https://raw.githubusercontent.com/psf/requests/main/ext/ss.png)](https://requests.readthedocs.io) ## Cloning the repository @@ -115,6 +117,6 @@ git config --global fetch.fsck.badTimezone ignore --- -[![Kenneth Reitz](https://raw.githubusercontent.com/psf/requests/master/ext/kr.png)](https://kennethreitz.org) [![Python Software Foundation](https://raw.githubusercontent.com/psf/requests/master/ext/psf.png)](https://www.python.org/psf) +[![Kenneth Reitz](https://raw.githubusercontent.com/psf/requests/main/ext/kr.png)](https://kennethreitz.org) [![Python Software Foundation](https://raw.githubusercontent.com/psf/requests/main/ext/psf.png)](https://www.python.org/psf) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/RECORD new file mode 100644 index 0000000..29d2a2b --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/RECORD @@ -0,0 +1,25 @@ +requests-2.28.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +requests-2.28.0.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +requests-2.28.0.dist-info/METADATA,sha256=ZQpIAH3Ya9ySK0975DlGGvc8ntUAIUgpfBpJ_0wkk9M,4642 +requests-2.28.0.dist-info/RECORD,, +requests-2.28.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +requests-2.28.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +requests-2.28.0.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9 +requests/__init__.py,sha256=5Ts_3rYn4fyxQuSHFQ2ajz161OcIbf3LLUi5K2sfLAY,4972 +requests/__version__.py,sha256=tpEGeK6VtcSpvfPrBiRXlsMK_aqLuLk9p0ZTF2AMp5Q,440 +requests/_internal_utils.py,sha256=aSPlF4uDhtfKxEayZJJ7KkAxtormeTfpwKSBSwtmAUw,1397 +requests/adapters.py,sha256=sEnHGl4mJz4QHBT8jG6bU5aPinUtdoH3BIuAIzT-X74,21287 +requests/api.py,sha256=dyvkDd5itC9z2g0wHl_YfD1yf6YwpGWLO7__8e21nks,6377 +requests/auth.py,sha256=h-HLlVx9j8rKV5hfSAycP2ApOSglTz77R0tz7qCbbEE,10187 +requests/certs.py,sha256=Z9Sb410Anv6jUFTyss0jFFhU6xst8ctELqfy8Ev23gw,429 +requests/compat.py,sha256=yxntVOSEHGMrn7FNr_32EEam1ZNAdPRdSE13_yaHzTk,1451 +requests/cookies.py,sha256=kD3kNEcCj-mxbtf5fJsSaT86eGoEYpD3X0CSgpzl7BM,18560 +requests/exceptions.py,sha256=DhveFBclVjTRxhRduVpO-GbMYMID2gmjdLfNEqNpI_U,3811 +requests/help.py,sha256=gPX5d_H7Xd88aDABejhqGgl9B1VFRTt5BmiYvL3PzIQ,3875 +requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 +requests/models.py,sha256=gsRZMJLoxSyG5G_X8e0E4FG8csLPhWzntKhwX-mccDI,35261 +requests/packages.py,sha256=DXgv-FJIczZITmv0vEBAhWj4W-5CGCIN_ksvgR17Dvs,957 +requests/sessions.py,sha256=KUqJcRRLovNefUs7ScOXSUVCcfSayTFWtbiJ7gOSlTI,30180 +requests/status_codes.py,sha256=FvHmT5uH-_uimtRz5hH9VCbt7VV-Nei2J9upbej6j8g,4235 +requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 +requests/utils.py,sha256=5_ws-bsKI9EHl7j27yi-6HFzPBKultPgd7HfPrUToWI,33228 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/WHEEL similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/future-0.18.2.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/WHEEL diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.28.0.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__init__.py old mode 100755 new mode 100644 index 0ac7713..343e508 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__init__.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) @@ -40,8 +38,10 @@ :license: Apache 2.0, see LICENSE for more details. """ -import urllib3 import warnings + +import urllib3 + from .exceptions import RequestsDependencyWarning try: @@ -54,13 +54,14 @@ except ImportError: chardet_version = None + def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): - urllib3_version = urllib3_version.split('.') - assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. + urllib3_version = urllib3_version.split(".") + assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: - urllib3_version.append('0') + urllib3_version.append("0") # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 @@ -72,36 +73,46 @@ def check_compatibility(urllib3_version, chardet_version, charset_normalizer_ver # Check charset_normalizer for compatibility. if chardet_version: - major, minor, patch = chardet_version.split('.')[:3] + major, minor, patch = chardet_version.split(".")[:3] major, minor, patch = int(major), int(minor), int(patch) # chardet_version >= 3.0.2, < 5.0.0 assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0) elif charset_normalizer_version: - major, minor, patch = charset_normalizer_version.split('.')[:3] + major, minor, patch = charset_normalizer_version.split(".")[:3] major, minor, patch = int(major), int(minor), int(patch) # charset_normalizer >= 2.0.0 < 3.0.0 assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0) else: raise Exception("You need either charset_normalizer or chardet installed") + def _check_cryptography(cryptography_version): # cryptography < 1.3.4 try: - cryptography_version = list(map(int, cryptography_version.split('.'))) + cryptography_version = list(map(int, cryptography_version.split("."))) except ValueError: return if cryptography_version < [1, 3, 4]: - warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) + warning = "Old version of cryptography ({}) may cause slowdown.".format( + cryptography_version + ) warnings.warn(warning, RequestsDependencyWarning) + # Check imported dependencies for compatibility. try: - check_compatibility(urllib3.__version__, chardet_version, charset_normalizer_version) + check_compatibility( + urllib3.__version__, chardet_version, charset_normalizer_version + ) except (AssertionError, ValueError): - warnings.warn("urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " - "version!".format(urllib3.__version__, chardet_version, charset_normalizer_version), - RequestsDependencyWarning) + warnings.warn( + "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " + "version!".format( + urllib3.__version__, chardet_version, charset_normalizer_version + ), + RequestsDependencyWarning, + ) # Attempt to enable urllib3's fallback for SNI support # if the standard library doesn't support SNI or the @@ -114,39 +125,56 @@ def _check_cryptography(cryptography_version): if not getattr(ssl, "HAS_SNI", False): from urllib3.contrib import pyopenssl + pyopenssl.inject_into_urllib3() # Check cryptography version from cryptography import __version__ as cryptography_version + _check_cryptography(cryptography_version) except ImportError: pass # urllib3's DependencyWarnings should be silenced. from urllib3.exceptions import DependencyWarning -warnings.simplefilter('ignore', DependencyWarning) -from .__version__ import __title__, __description__, __url__, __version__ -from .__version__ import __build__, __author__, __author_email__, __license__ -from .__version__ import __copyright__, __cake__ - -from . import utils -from . import packages -from .models import Request, Response, PreparedRequest -from .api import request, get, head, post, patch, put, delete, options -from .sessions import session, Session -from .status_codes import codes -from .exceptions import ( - RequestException, Timeout, URLRequired, - TooManyRedirects, HTTPError, ConnectionError, - FileModeWarning, ConnectTimeout, ReadTimeout -) +warnings.simplefilter("ignore", DependencyWarning) # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler +from . import packages, utils +from .__version__ import ( + __author__, + __author_email__, + __build__, + __cake__, + __copyright__, + __description__, + __license__, + __title__, + __url__, + __version__, +) +from .api import delete, get, head, options, patch, post, put, request +from .exceptions import ( + ConnectionError, + ConnectTimeout, + FileModeWarning, + HTTPError, + JSONDecodeError, + ReadTimeout, + RequestException, + Timeout, + TooManyRedirects, + URLRequired, +) +from .models import PreparedRequest, Request, Response +from .sessions import Session, session +from .status_codes import codes + logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. -warnings.simplefilter('default', FileModeWarning, append=True) +warnings.simplefilter("default", FileModeWarning, append=True) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__version__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__version__.py old mode 100755 new mode 100644 index 0d7cde1..cbb5c5c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__version__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/__version__.py @@ -2,13 +2,13 @@ # |( |- |.| | | |- `-. | `-. # ' ' `-' `-`.`-' `-' `-' ' `-' -__title__ = 'requests' -__description__ = 'Python HTTP for Humans.' -__url__ = 'https://requests.readthedocs.io' -__version__ = '2.26.0' -__build__ = 0x022600 -__author__ = 'Kenneth Reitz' -__author_email__ = 'me@kennethreitz.org' -__license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2020 Kenneth Reitz' -__cake__ = u'\u2728 \U0001f370 \u2728' +__title__ = "requests" +__description__ = "Python HTTP for Humans." +__url__ = "https://requests.readthedocs.io" +__version__ = "2.28.0" +__build__ = 0x022800 +__author__ = "Kenneth Reitz" +__author_email__ = "me@kennethreitz.org" +__license__ = "Apache 2.0" +__copyright__ = "Copyright 2022 Kenneth Reitz" +__cake__ = "\u2728 \U0001f370 \u2728" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/_internal_utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/_internal_utils.py old mode 100755 new mode 100644 index 759d9a5..7dc9bc5 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/_internal_utils.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/_internal_utils.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests._internal_utils ~~~~~~~~~~~~~~ @@ -7,11 +5,22 @@ Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) """ +import re + +from .compat import builtin_str + +_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") +_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") +_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") +_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") -from .compat import is_py2, builtin_str, str +HEADER_VALIDATORS = { + bytes: (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE), + str: (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR), +} -def to_native_string(string, encoding='ascii'): +def to_native_string(string, encoding="ascii"): """Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. @@ -19,10 +28,7 @@ def to_native_string(string, encoding='ascii'): if isinstance(string, builtin_str): out = string else: - if is_py2: - out = string.encode(encoding) - else: - out = string.decode(encoding) + out = string.decode(encoding) return out @@ -36,7 +42,7 @@ def unicode_is_ascii(u_string): """ assert isinstance(u_string, str) try: - u_string.encode('ascii') + u_string.encode("ascii") return True except UnicodeEncodeError: return False diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/adapters.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/adapters.py old mode 100755 new mode 100644 index fa4d9b3..d3b2d5b --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/adapters.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/adapters.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.adapters ~~~~~~~~~~~~~~~~~ @@ -9,57 +7,76 @@ """ import os.path -import socket +import socket # noqa: F401 +from urllib3.exceptions import ClosedPoolError, ConnectTimeoutError +from urllib3.exceptions import HTTPError as _HTTPError +from urllib3.exceptions import InvalidHeader as _InvalidHeader +from urllib3.exceptions import ( + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, +) +from urllib3.exceptions import ProxyError as _ProxyError +from urllib3.exceptions import ReadTimeoutError, ResponseError +from urllib3.exceptions import SSLError as _SSLError from urllib3.poolmanager import PoolManager, proxy_from_url from urllib3.response import HTTPResponse -from urllib3.util import parse_url from urllib3.util import Timeout as TimeoutSauce +from urllib3.util import parse_url from urllib3.util.retry import Retry -from urllib3.exceptions import ClosedPoolError -from urllib3.exceptions import ConnectTimeoutError -from urllib3.exceptions import HTTPError as _HTTPError -from urllib3.exceptions import MaxRetryError -from urllib3.exceptions import NewConnectionError -from urllib3.exceptions import ProxyError as _ProxyError -from urllib3.exceptions import ProtocolError -from urllib3.exceptions import ReadTimeoutError -from urllib3.exceptions import SSLError as _SSLError -from urllib3.exceptions import ResponseError -from urllib3.exceptions import LocationValueError +from .auth import _basic_auth_str +from .compat import basestring, urlparse +from .cookies import extract_cookies_to_jar +from .exceptions import ( + ConnectionError, + ConnectTimeout, + InvalidHeader, + InvalidProxyURL, + InvalidSchema, + InvalidURL, + ProxyError, + ReadTimeout, + RetryError, + SSLError, +) from .models import Response -from .compat import urlparse, basestring -from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, - get_encoding_from_headers, prepend_scheme_if_needed, - get_auth_from_url, urldefragauth, select_proxy) from .structures import CaseInsensitiveDict -from .cookies import extract_cookies_to_jar -from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, - ProxyError, RetryError, InvalidSchema, InvalidProxyURL, - InvalidURL) -from .auth import _basic_auth_str +from .utils import ( + DEFAULT_CA_BUNDLE_PATH, + extract_zipped_paths, + get_auth_from_url, + get_encoding_from_headers, + prepend_scheme_if_needed, + select_proxy, + urldefragauth, +) try: from urllib3.contrib.socks import SOCKSProxyManager except ImportError: + def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") + DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None -class BaseAdapter(object): +class BaseAdapter: """The Base Transport Adapter""" def __init__(self): - super(BaseAdapter, self).__init__() + super().__init__() - def send(self, request, stream=False, timeout=None, verify=True, - cert=None, proxies=None): + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. @@ -107,12 +124,22 @@ class HTTPAdapter(BaseAdapter): >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ - __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', - '_pool_block'] - def __init__(self, pool_connections=DEFAULT_POOLSIZE, - pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, - pool_block=DEFAULT_POOLBLOCK): + __attrs__ = [ + "max_retries", + "config", + "_pool_connections", + "_pool_maxsize", + "_pool_block", + ] + + def __init__( + self, + pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, + max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK, + ): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: @@ -120,7 +147,7 @@ def __init__(self, pool_connections=DEFAULT_POOLSIZE, self.config = {} self.proxy_manager = {} - super(HTTPAdapter, self).__init__() + super().__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize @@ -140,10 +167,13 @@ def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) - self.init_poolmanager(self._pool_connections, self._pool_maxsize, - block=self._pool_block) + self.init_poolmanager( + self._pool_connections, self._pool_maxsize, block=self._pool_block + ) - def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): + def init_poolmanager( + self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs + ): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only @@ -160,8 +190,13 @@ def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool self._pool_maxsize = maxsize self._pool_block = block - self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=True, **pool_kwargs) + self.poolmanager = PoolManager( + num_pools=connections, + maxsize=maxsize, + block=block, + strict=True, + **pool_kwargs, + ) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. @@ -177,7 +212,7 @@ def proxy_manager_for(self, proxy, **proxy_kwargs): """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] - elif proxy.lower().startswith('socks'): + elif proxy.lower().startswith("socks"): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, @@ -186,7 +221,7 @@ def proxy_manager_for(self, proxy, **proxy_kwargs): num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, - **proxy_kwargs + **proxy_kwargs, ) else: proxy_headers = self.proxy_headers(proxy) @@ -196,7 +231,8 @@ def proxy_manager_for(self, proxy, **proxy_kwargs): num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, - **proxy_kwargs) + **proxy_kwargs, + ) return manager @@ -212,7 +248,7 @@ def cert_verify(self, conn, url, verify, cert): to a CA bundle to use :param cert: The SSL certificate to verify. """ - if url.lower().startswith('https') and verify: + if url.lower().startswith("https") and verify: cert_loc = None @@ -224,17 +260,19 @@ def cert_verify(self, conn, url, verify, cert): cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) if not cert_loc or not os.path.exists(cert_loc): - raise IOError("Could not find a suitable TLS CA certificate bundle, " - "invalid path: {}".format(cert_loc)) + raise OSError( + f"Could not find a suitable TLS CA certificate bundle, " + f"invalid path: {cert_loc}" + ) - conn.cert_reqs = 'CERT_REQUIRED' + conn.cert_reqs = "CERT_REQUIRED" if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: - conn.cert_reqs = 'CERT_NONE' + conn.cert_reqs = "CERT_NONE" conn.ca_certs = None conn.ca_cert_dir = None @@ -246,11 +284,14 @@ def cert_verify(self, conn, url, verify, cert): conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): - raise IOError("Could not find the TLS certificate file, " - "invalid path: {}".format(conn.cert_file)) + raise OSError( + f"Could not find the TLS certificate file, " + f"invalid path: {conn.cert_file}" + ) if conn.key_file and not os.path.exists(conn.key_file): - raise IOError("Could not find the TLS key file, " - "invalid path: {}".format(conn.key_file)) + raise OSError( + f"Could not find the TLS key file, invalid path: {conn.key_file}" + ) def build_response(self, req, resp): """Builds a :class:`Response ` object from a urllib3 @@ -265,10 +306,10 @@ def build_response(self, req, resp): response = Response() # Fallback to None if there's no status_code, for whatever reason. - response.status_code = getattr(resp, 'status', None) + response.status_code = getattr(resp, "status", None) # Make headers case-insensitive. - response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) + response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) @@ -276,7 +317,7 @@ def build_response(self, req, resp): response.reason = response.raw.reason if isinstance(req.url, bytes): - response.url = req.url.decode('utf-8') + response.url = req.url.decode("utf-8") else: response.url = req.url @@ -301,11 +342,13 @@ def get_connection(self, url, proxies=None): proxy = select_proxy(url, proxies) if proxy: - proxy = prepend_scheme_if_needed(proxy, 'http') + proxy = prepend_scheme_if_needed(proxy, "http") proxy_url = parse_url(proxy) if not proxy_url.host: - raise InvalidProxyURL("Please check proxy URL. It is malformed" - " and could be missing the host.") + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: @@ -343,11 +386,11 @@ def request_url(self, request, proxies): proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme - is_proxied_http_request = (proxy and scheme != 'https') + is_proxied_http_request = proxy and scheme != "https" using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() - using_socks_proxy = proxy_scheme.startswith('socks') + using_socks_proxy = proxy_scheme.startswith("socks") url = request.path_url if is_proxied_http_request and not using_socks_proxy: @@ -386,12 +429,13 @@ def proxy_headers(self, proxy): username, password = get_auth_from_url(proxy) if username: - headers['Proxy-Authorization'] = _basic_auth_str(username, - password) + headers["Proxy-Authorization"] = _basic_auth_str(username, password) return headers - def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. @@ -415,20 +459,26 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) - self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) + self.add_headers( + request, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + ) - chunked = not (request.body is None or 'Content-Length' in request.headers) + chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) - except ValueError as e: - # this may raise a string formatting error. - err = ("Invalid timeout {}. Pass a (connect, read) " - "timeout tuple, or a single float to set " - "both timeouts to the same value".format(timeout)) - raise ValueError(err) + except ValueError: + raise ValueError( + f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " + f"or a single float to set both timeouts to the same value." + ) elif isinstance(timeout, TimeoutSauce): pass else: @@ -446,20 +496,24 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox preload_content=False, decode_content=False, retries=self.max_retries, - timeout=timeout + timeout=timeout, ) # Send the request. else: - if hasattr(conn, 'proxy_pool'): + if hasattr(conn, "proxy_pool"): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: - low_conn.putrequest(request.method, - url, - skip_accept_encoding=True) + skip_host = "Host" in request.headers + low_conn.putrequest( + request.method, + url, + skip_accept_encoding=True, + skip_host=skip_host, + ) for header, value in request.headers.items(): low_conn.putheader(header, value) @@ -467,34 +521,29 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox low_conn.endheaders() for i in request.body: - low_conn.send(hex(len(i))[2:].encode('utf-8')) - low_conn.send(b'\r\n') + low_conn.send(hex(len(i))[2:].encode("utf-8")) + low_conn.send(b"\r\n") low_conn.send(i) - low_conn.send(b'\r\n') - low_conn.send(b'0\r\n\r\n') + low_conn.send(b"\r\n") + low_conn.send(b"0\r\n\r\n") # Receive the response from the server - try: - # For Python 2.7, use buffering of HTTP responses - r = low_conn.getresponse(buffering=True) - except TypeError: - # For compatibility with Python 3.3+ - r = low_conn.getresponse() + r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, - decode_content=False + decode_content=False, ) - except: + except Exception: # If we hit any problems here, clean up the connection. - # Then, reraise so that we can handle the actual exception. + # Then, raise so that we can handle the actual exception. low_conn.close() raise - except (ProtocolError, socket.error) as err: + except (ProtocolError, OSError) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: @@ -527,6 +576,8 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) + elif isinstance(e, _InvalidHeader): + raise InvalidHeader(e, request=request) else: raise diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/api.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/api.py old mode 100755 new mode 100644 index 4cba90e..2f71aae --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/api.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/api.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.api ~~~~~~~~~~~~ @@ -72,7 +70,7 @@ def get(url, params=None, **kwargs): :rtype: requests.Response """ - return request('get', url, params=params, **kwargs) + return request("get", url, params=params, **kwargs) def options(url, **kwargs): @@ -84,7 +82,7 @@ def options(url, **kwargs): :rtype: requests.Response """ - return request('options', url, **kwargs) + return request("options", url, **kwargs) def head(url, **kwargs): @@ -98,8 +96,8 @@ def head(url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', False) - return request('head', url, **kwargs) + kwargs.setdefault("allow_redirects", False) + return request("head", url, **kwargs) def post(url, data=None, json=None, **kwargs): @@ -114,7 +112,7 @@ def post(url, data=None, json=None, **kwargs): :rtype: requests.Response """ - return request('post', url, data=data, json=json, **kwargs) + return request("post", url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): @@ -129,7 +127,7 @@ def put(url, data=None, **kwargs): :rtype: requests.Response """ - return request('put', url, data=data, **kwargs) + return request("put", url, data=data, **kwargs) def patch(url, data=None, **kwargs): @@ -144,7 +142,7 @@ def patch(url, data=None, **kwargs): :rtype: requests.Response """ - return request('patch', url, data=data, **kwargs) + return request("patch", url, data=data, **kwargs) def delete(url, **kwargs): @@ -156,4 +154,4 @@ def delete(url, **kwargs): :rtype: requests.Response """ - return request('delete', url, **kwargs) + return request("delete", url, **kwargs) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/auth.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/auth.py old mode 100755 new mode 100644 index eeface3..9733686 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/auth.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/auth.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.auth ~~~~~~~~~~~~~ @@ -7,22 +5,21 @@ This module contains the authentication handlers for Requests. """ +import hashlib import os import re -import time -import hashlib import threading +import time import warnings - from base64 import b64encode -from .compat import urlparse, str, basestring -from .cookies import extract_cookies_to_jar from ._internal_utils import to_native_string +from .compat import basestring, str, urlparse +from .cookies import extract_cookies_to_jar from .utils import parse_dict_header -CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' -CONTENT_TYPE_MULTI_PART = 'multipart/form-data' +CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" +CONTENT_TYPE_MULTI_PART = "multipart/form-data" def _basic_auth_str(username, password): @@ -57,23 +54,23 @@ def _basic_auth_str(username, password): # -- End Removal -- if isinstance(username, str): - username = username.encode('latin1') + username = username.encode("latin1") if isinstance(password, str): - password = password.encode('latin1') + password = password.encode("latin1") - authstr = 'Basic ' + to_native_string( - b64encode(b':'.join((username, password))).strip() + authstr = "Basic " + to_native_string( + b64encode(b":".join((username, password))).strip() ) return authstr -class AuthBase(object): +class AuthBase: """Base class that all auth implementations derive from""" def __call__(self, r): - raise NotImplementedError('Auth hooks must be callable.') + raise NotImplementedError("Auth hooks must be callable.") class HTTPBasicAuth(AuthBase): @@ -84,16 +81,18 @@ def __init__(self, username, password): self.password = password def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) def __ne__(self, other): return not self == other def __call__(self, r): - r.headers['Authorization'] = _basic_auth_str(self.username, self.password) + r.headers["Authorization"] = _basic_auth_str(self.username, self.password) return r @@ -101,7 +100,7 @@ class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): - r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) + r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) return r @@ -116,9 +115,9 @@ def __init__(self, username, password): def init_per_thread_state(self): # Ensure state is initialized just once per-thread - if not hasattr(self._thread_local, 'init'): + if not hasattr(self._thread_local, "init"): self._thread_local.init = True - self._thread_local.last_nonce = '' + self._thread_local.last_nonce = "" self._thread_local.nonce_count = 0 self._thread_local.chal = {} self._thread_local.pos = None @@ -129,44 +128,52 @@ def build_digest_header(self, method, url): :rtype: str """ - realm = self._thread_local.chal['realm'] - nonce = self._thread_local.chal['nonce'] - qop = self._thread_local.chal.get('qop') - algorithm = self._thread_local.chal.get('algorithm') - opaque = self._thread_local.chal.get('opaque') + realm = self._thread_local.chal["realm"] + nonce = self._thread_local.chal["nonce"] + qop = self._thread_local.chal.get("qop") + algorithm = self._thread_local.chal.get("algorithm") + opaque = self._thread_local.chal.get("opaque") hash_utf8 = None if algorithm is None: - _algorithm = 'MD5' + _algorithm = "MD5" else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level - if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': + if _algorithm == "MD5" or _algorithm == "MD5-SESS": + def md5_utf8(x): if isinstance(x, str): - x = x.encode('utf-8') + x = x.encode("utf-8") return hashlib.md5(x).hexdigest() + hash_utf8 = md5_utf8 - elif _algorithm == 'SHA': + elif _algorithm == "SHA": + def sha_utf8(x): if isinstance(x, str): - x = x.encode('utf-8') + x = x.encode("utf-8") return hashlib.sha1(x).hexdigest() + hash_utf8 = sha_utf8 - elif _algorithm == 'SHA-256': + elif _algorithm == "SHA-256": + def sha256_utf8(x): if isinstance(x, str): - x = x.encode('utf-8') + x = x.encode("utf-8") return hashlib.sha256(x).hexdigest() + hash_utf8 = sha256_utf8 - elif _algorithm == 'SHA-512': + elif _algorithm == "SHA-512": + def sha512_utf8(x): if isinstance(x, str): - x = x.encode('utf-8') + x = x.encode("utf-8") return hashlib.sha512(x).hexdigest() + hash_utf8 = sha512_utf8 - KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) + KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 if hash_utf8 is None: return None @@ -177,10 +184,10 @@ def sha512_utf8(x): #: path is request-uri defined in RFC 2616 which should not be empty path = p_parsed.path or "/" if p_parsed.query: - path += '?' + p_parsed.query + path += f"?{p_parsed.query}" - A1 = '%s:%s:%s' % (self.username, realm, self.password) - A2 = '%s:%s' % (method, path) + A1 = f"{self.username}:{realm}:{self.password}" + A2 = f"{method}:{path}" HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) @@ -189,22 +196,20 @@ def sha512_utf8(x): self._thread_local.nonce_count += 1 else: self._thread_local.nonce_count = 1 - ncvalue = '%08x' % self._thread_local.nonce_count - s = str(self._thread_local.nonce_count).encode('utf-8') - s += nonce.encode('utf-8') - s += time.ctime().encode('utf-8') + ncvalue = f"{self._thread_local.nonce_count:08x}" + s = str(self._thread_local.nonce_count).encode("utf-8") + s += nonce.encode("utf-8") + s += time.ctime().encode("utf-8") s += os.urandom(8) - cnonce = (hashlib.sha1(s).hexdigest()[:16]) - if _algorithm == 'MD5-SESS': - HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) + cnonce = hashlib.sha1(s).hexdigest()[:16] + if _algorithm == "MD5-SESS": + HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") if not qop: - respdig = KD(HA1, "%s:%s" % (nonce, HA2)) - elif qop == 'auth' or 'auth' in qop.split(','): - noncebit = "%s:%s:%s:%s:%s" % ( - nonce, ncvalue, cnonce, 'auth', HA2 - ) + respdig = KD(HA1, f"{nonce}:{HA2}") + elif qop == "auth" or "auth" in qop.split(","): + noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" respdig = KD(HA1, noncebit) else: # XXX handle auth-int. @@ -213,18 +218,20 @@ def sha512_utf8(x): self._thread_local.last_nonce = nonce # XXX should the partial digests be encoded too? - base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ - 'response="%s"' % (self.username, realm, nonce, path, respdig) + base = ( + f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' + f'uri="{path}", response="{respdig}"' + ) if opaque: - base += ', opaque="%s"' % opaque + base += f', opaque="{opaque}"' if algorithm: - base += ', algorithm="%s"' % algorithm + base += f', algorithm="{algorithm}"' if entdig: - base += ', digest="%s"' % entdig + base += f', digest="{entdig}"' if qop: - base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) + base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' - return 'Digest %s' % (base) + return f"Digest {base}" def handle_redirect(self, r, **kwargs): """Reset num_401_calls counter on redirects.""" @@ -248,13 +255,13 @@ def handle_401(self, r, **kwargs): # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) - s_auth = r.headers.get('www-authenticate', '') + s_auth = r.headers.get("www-authenticate", "") - if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: + if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 - pat = re.compile(r'digest ', flags=re.IGNORECASE) - self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) + pat = re.compile(r"digest ", flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. @@ -264,8 +271,9 @@ def handle_401(self, r, **kwargs): extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) - prep.headers['Authorization'] = self.build_digest_header( - prep.method, prep.url) + prep.headers["Authorization"] = self.build_digest_header( + prep.method, prep.url + ) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep @@ -280,7 +288,7 @@ def __call__(self, r): self.init_per_thread_state() # If we have a saved nonce, skip the 401 if self._thread_local.last_nonce: - r.headers['Authorization'] = self.build_digest_header(r.method, r.url) + r.headers["Authorization"] = self.build_digest_header(r.method, r.url) try: self._thread_local.pos = r.body.tell() except AttributeError: @@ -289,17 +297,19 @@ def __call__(self, r): # file position of the previous body. Ensure it's set to # None. self._thread_local.pos = None - r.register_hook('response', self.handle_401) - r.register_hook('response', self.handle_redirect) + r.register_hook("response", self.handle_401) + r.register_hook("response", self.handle_redirect) self._thread_local.num_401_calls = 1 return r def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) def __ne__(self, other): return not self == other diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/certs.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/certs.py old mode 100755 new mode 100644 index d1a378d..be422c3 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/certs.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/certs.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ requests.certs @@ -14,5 +13,5 @@ """ from certifi import where -if __name__ == '__main__': +if __name__ == "__main__": print(where()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/compat.py old mode 100755 new mode 100644 index 0b14f50..6776163 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/compat.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/compat.py @@ -1,11 +1,10 @@ -# -*- coding: utf-8 -*- - """ requests.compat ~~~~~~~~~~~~~~~ -This module handles import compatibility issues between Python 2 and -Python 3. +This module previously handled import compatibility issues +between Python 2 and Python 3. It remains for backwards +compatibility until the next major version. """ try: @@ -23,53 +22,58 @@ _ver = sys.version_info #: Python 2.x? -is_py2 = (_ver[0] == 2) +is_py2 = _ver[0] == 2 #: Python 3.x? -is_py3 = (_ver[0] == 3) +is_py3 = _ver[0] == 3 +# json/simplejson module import resolution +has_simplejson = False try: import simplejson as json + + has_simplejson = True except ImportError: import json -# --------- -# Specifics -# --------- - -if is_py2: - from urllib import ( - quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, - proxy_bypass, proxy_bypass_environment, getproxies_environment) - from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag - from urllib2 import parse_http_list - import cookielib - from Cookie import Morsel - from StringIO import StringIO - # Keep OrderedDict for backwards compatibility. - from collections import Callable, Mapping, MutableMapping, OrderedDict - +if has_simplejson: + from simplejson import JSONDecodeError +else: + from json import JSONDecodeError - builtin_str = str - bytes = str - str = unicode - basestring = basestring - numeric_types = (int, long, float) - integer_types = (int, long) +# Keep OrderedDict for backwards compatibility. +from collections import OrderedDict +from collections.abc import Callable, Mapping, MutableMapping +from http import cookiejar as cookielib +from http.cookies import Morsel +from io import StringIO -elif is_py3: - from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag - from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment - from http import cookiejar as cookielib - from http.cookies import Morsel - from io import StringIO - # Keep OrderedDict for backwards compatibility. - from collections import OrderedDict - from collections.abc import Callable, Mapping, MutableMapping +# -------------- +# Legacy Imports +# -------------- +from urllib.parse import ( + quote, + quote_plus, + unquote, + unquote_plus, + urldefrag, + urlencode, + urljoin, + urlparse, + urlsplit, + urlunparse, +) +from urllib.request import ( + getproxies, + getproxies_environment, + parse_http_list, + proxy_bypass, + proxy_bypass_environment, +) - builtin_str = str - str = str - bytes = bytes - basestring = (str, bytes) - numeric_types = (int, float) - integer_types = (int,) +builtin_str = str +str = str +bytes = bytes +basestring = (str, bytes) +numeric_types = (int, float) +integer_types = (int,) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/cookies.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/cookies.py old mode 100755 new mode 100644 index 56fccd9..bf54ab2 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/cookies.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/cookies.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.cookies ~~~~~~~~~~~~~~~~ @@ -9,12 +7,12 @@ requests.utils imports from here, so be careful with imports. """ +import calendar import copy import time -import calendar from ._internal_utils import to_native_string -from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping +from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse try: import threading @@ -22,7 +20,7 @@ import dummy_threading as threading -class MockRequest(object): +class MockRequest: """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly @@ -51,16 +49,22 @@ def get_origin_req_host(self): def get_full_url(self): # Only return the response's URL if the user hadn't set the Host # header - if not self._r.headers.get('Host'): + if not self._r.headers.get("Host"): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain - host = to_native_string(self._r.headers['Host'], encoding='utf-8') + host = to_native_string(self._r.headers["Host"], encoding="utf-8") parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it - return urlunparse([ - parsed.scheme, host, parsed.path, parsed.params, parsed.query, - parsed.fragment - ]) + return urlunparse( + [ + parsed.scheme, + host, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment, + ] + ) def is_unverifiable(self): return True @@ -73,7 +77,9 @@ def get_header(self, name, default=None): def add_header(self, key, val): """cookielib has no legitimate use for this method; add it back if you find one.""" - raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") + raise NotImplementedError( + "Cookie headers should be added with add_unredirected_header()" + ) def add_unredirected_header(self, name, value): self._new_headers[name] = value @@ -94,7 +100,7 @@ def host(self): return self.get_host() -class MockResponse(object): +class MockResponse: """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response @@ -122,8 +128,7 @@ def extract_cookies_to_jar(jar, request, response): :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ - if not (hasattr(response, '_original_response') and - response._original_response): + if not (hasattr(response, "_original_response") and response._original_response): return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) @@ -140,7 +145,7 @@ def get_cookie_header(jar, request): """ r = MockRequest(request) jar.add_cookie_header(r) - return r.get_new_headers().get('Cookie') + return r.get_new_headers().get("Cookie") def remove_cookie_by_name(cookiejar, name, domain=None, path=None): @@ -205,7 +210,9 @@ def set(self, name, value, **kwargs): """ # support client code that unsets cookies by assignment of a None value: if value is None: - remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) + remove_cookie_by_name( + self, name, domain=kwargs.get("domain"), path=kwargs.get("path") + ) return if isinstance(value, Morsel): @@ -305,16 +312,15 @@ def get_dict(self, domain=None, path=None): """ dictionary = {} for cookie in iter(self): - if ( - (domain is None or cookie.domain == domain) and - (path is None or cookie.path == path) + if (domain is None or cookie.domain == domain) and ( + path is None or cookie.path == path ): dictionary[cookie.name] = cookie.value return dictionary def __contains__(self, name): try: - return super(RequestsCookieJar, self).__contains__(name) + return super().__contains__(name) except CookieConflictError: return True @@ -341,9 +347,13 @@ def __delitem__(self, name): remove_cookie_by_name(self, name) def set_cookie(self, cookie, *args, **kwargs): - if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): - cookie.value = cookie.value.replace('\\"', '') - return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) + if ( + hasattr(cookie.value, "startswith") + and cookie.value.startswith('"') + and cookie.value.endswith('"') + ): + cookie.value = cookie.value.replace('\\"', "") + return super().set_cookie(cookie, *args, **kwargs) def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" @@ -351,7 +361,7 @@ def update(self, other): for cookie in other: self.set_cookie(copy.copy(cookie)) else: - super(RequestsCookieJar, self).update(other) + super().update(other) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. @@ -371,7 +381,7 @@ def _find(self, name, domain=None, path=None): if path is None or cookie.path == path: return cookie.value - raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") def _find_no_duplicates(self, name, domain=None, path=None): """Both ``__get_item__`` and ``get`` call this function: it's never @@ -390,25 +400,29 @@ def _find_no_duplicates(self, name, domain=None, path=None): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: - if toReturn is not None: # if there are multiple cookies that meet passed in criteria - raise CookieConflictError('There are multiple cookies with name, %r' % (name)) - toReturn = cookie.value # we will eventually return this as long as no cookie conflict + if toReturn is not None: + # if there are multiple cookies that meet passed in criteria + raise CookieConflictError( + f"There are multiple cookies with name, {name!r}" + ) + # we will eventually return this as long as no cookie conflict + toReturn = cookie.value if toReturn: return toReturn - raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object - state.pop('_cookies_lock') + state.pop("_cookies_lock") return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) - if '_cookies_lock' not in self.__dict__: + if "_cookies_lock" not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): @@ -427,7 +441,7 @@ def _copy_cookie_jar(jar): if jar is None: return None - if hasattr(jar, 'copy'): + if hasattr(jar, "copy"): # We're dealing with an instance of RequestsCookieJar return jar.copy() # We're dealing with a generic CookieJar instance @@ -445,31 +459,32 @@ def create_cookie(name, value, **kwargs): and sent on every request (this is sometimes called a "supercookie"). """ result = { - 'version': 0, - 'name': name, - 'value': value, - 'port': None, - 'domain': '', - 'path': '/', - 'secure': False, - 'expires': None, - 'discard': True, - 'comment': None, - 'comment_url': None, - 'rest': {'HttpOnly': None}, - 'rfc2109': False, + "version": 0, + "name": name, + "value": value, + "port": None, + "domain": "", + "path": "/", + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, } badargs = set(kwargs) - set(result) if badargs: - err = 'create_cookie() got unexpected keyword arguments: %s' - raise TypeError(err % list(badargs)) + raise TypeError( + f"create_cookie() got unexpected keyword arguments: {list(badargs)}" + ) result.update(kwargs) - result['port_specified'] = bool(result['port']) - result['domain_specified'] = bool(result['domain']) - result['domain_initial_dot'] = result['domain'].startswith('.') - result['path_specified'] = bool(result['path']) + result["port_specified"] = bool(result["port"]) + result["domain_specified"] = bool(result["domain"]) + result["domain_initial_dot"] = result["domain"].startswith(".") + result["path_specified"] = bool(result["path"]) return cookielib.Cookie(**result) @@ -478,30 +493,28 @@ def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None - if morsel['max-age']: + if morsel["max-age"]: try: - expires = int(time.time() + int(morsel['max-age'])) + expires = int(time.time() + int(morsel["max-age"])) except ValueError: - raise TypeError('max-age: %s must be integer' % morsel['max-age']) - elif morsel['expires']: - time_template = '%a, %d-%b-%Y %H:%M:%S GMT' - expires = calendar.timegm( - time.strptime(morsel['expires'], time_template) - ) + raise TypeError(f"max-age: {morsel['max-age']} must be integer") + elif morsel["expires"]: + time_template = "%a, %d-%b-%Y %H:%M:%S GMT" + expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) return create_cookie( - comment=morsel['comment'], - comment_url=bool(morsel['comment']), + comment=morsel["comment"], + comment_url=bool(morsel["comment"]), discard=False, - domain=morsel['domain'], + domain=morsel["domain"], expires=expires, name=morsel.key, - path=morsel['path'], + path=morsel["path"], port=None, - rest={'HttpOnly': morsel['httponly']}, + rest={"HttpOnly": morsel["httponly"]}, rfc2109=False, - secure=bool(morsel['secure']), + secure=bool(morsel["secure"]), value=morsel.value, - version=morsel['version'] or 0, + version=morsel["version"] or 0, ) @@ -534,11 +547,10 @@ def merge_cookies(cookiejar, cookies): :rtype: CookieJar """ if not isinstance(cookiejar, cookielib.CookieJar): - raise ValueError('You can only merge into CookieJar') + raise ValueError("You can only merge into CookieJar") if isinstance(cookies, dict): - cookiejar = cookiejar_from_dict( - cookies, cookiejar=cookiejar, overwrite=False) + cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) elif isinstance(cookies, cookielib.CookieJar): try: cookiejar.update(cookies) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/exceptions.py old mode 100755 new mode 100644 index c412ec9..e1cedf8 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/exceptions.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/exceptions.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.exceptions ~~~~~~~~~~~~~~~~~~~ @@ -8,6 +6,8 @@ """ from urllib3.exceptions import HTTPError as BaseHTTPError +from .compat import JSONDecodeError as CompatJSONDecodeError + class RequestException(IOError): """There was an ambiguous exception that occurred while handling your @@ -16,19 +16,32 @@ class RequestException(IOError): def __init__(self, *args, **kwargs): """Initialize RequestException with `request` and `response` objects.""" - response = kwargs.pop('response', None) + response = kwargs.pop("response", None) self.response = response - self.request = kwargs.pop('request', None) - if (response is not None and not self.request and - hasattr(response, 'request')): + self.request = kwargs.pop("request", None) + if response is not None and not self.request and hasattr(response, "request"): self.request = self.response.request - super(RequestException, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class InvalidJSONError(RequestException): """A JSON error occurred.""" +class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError): + """Couldn't decode the text into json""" + + def __init__(self, *args, **kwargs): + """ + Construct the JSONDecodeError instance first with all + args. Then use it's args to construct the IOError so that + the json specific args aren't used as IOError specific args + and the error message from JSONDecodeError is preserved. + """ + CompatJSONDecodeError.__init__(self, *args) + InvalidJSONError.__init__(self, *self.args, **kwargs) + + class HTTPError(RequestException): """An HTTP error occurred.""" @@ -74,11 +87,11 @@ class TooManyRedirects(RequestException): class MissingSchema(RequestException, ValueError): - """The URL schema (e.g. http or https) is missing.""" + """The URL scheme (e.g. http or https) is missing.""" class InvalidSchema(RequestException, ValueError): - """See defaults.py for valid schemas.""" + """The URL scheme provided is either invalid or unsupported.""" class InvalidURL(RequestException, ValueError): @@ -112,6 +125,7 @@ class RetryError(RequestException): class UnrewindableBodyError(RequestException): """Requests encountered an error when trying to rewind a body.""" + # Warnings diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/help.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/help.py old mode 100755 new mode 100644 index 4cd6389..8fbcd65 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/help.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/help.py @@ -1,10 +1,9 @@ """Module containing bug report helper(s).""" -from __future__ import print_function import json import platform -import sys import ssl +import sys import idna import urllib3 @@ -28,16 +27,16 @@ OpenSSL = None cryptography = None else: - import OpenSSL import cryptography + import OpenSSL def _implementation(): """Return a dict with the Python implementation and version. Provide both the name and the version of the Python implementation - currently running. For example, on CPython 2.7.5 it will return - {'name': 'CPython', 'version': '2.7.5'}. + currently running. For example, on CPython 3.10.3 it will return + {'name': 'CPython', 'version': '3.10.3'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done @@ -45,83 +44,83 @@ def _implementation(): """ implementation = platform.python_implementation() - if implementation == 'CPython': + if implementation == "CPython": implementation_version = platform.python_version() - elif implementation == 'PyPy': - implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, - sys.pypy_version_info.minor, - sys.pypy_version_info.micro) - if sys.pypy_version_info.releaselevel != 'final': - implementation_version = ''.join([ - implementation_version, sys.pypy_version_info.releaselevel - ]) - elif implementation == 'Jython': + elif implementation == "PyPy": + implementation_version = "{}.{}.{}".format( + sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro, + ) + if sys.pypy_version_info.releaselevel != "final": + implementation_version = "".join( + [implementation_version, sys.pypy_version_info.releaselevel] + ) + elif implementation == "Jython": implementation_version = platform.python_version() # Complete Guess - elif implementation == 'IronPython': + elif implementation == "IronPython": implementation_version = platform.python_version() # Complete Guess else: - implementation_version = 'Unknown' + implementation_version = "Unknown" - return {'name': implementation, 'version': implementation_version} + return {"name": implementation, "version": implementation_version} def info(): """Generate information for a bug report.""" try: platform_info = { - 'system': platform.system(), - 'release': platform.release(), + "system": platform.system(), + "release": platform.release(), } - except IOError: + except OSError: platform_info = { - 'system': 'Unknown', - 'release': 'Unknown', + "system": "Unknown", + "release": "Unknown", } implementation_info = _implementation() - urllib3_info = {'version': urllib3.__version__} - charset_normalizer_info = {'version': None} - chardet_info = {'version': None} + urllib3_info = {"version": urllib3.__version__} + charset_normalizer_info = {"version": None} + chardet_info = {"version": None} if charset_normalizer: - charset_normalizer_info = {'version': charset_normalizer.__version__} + charset_normalizer_info = {"version": charset_normalizer.__version__} if chardet: - chardet_info = {'version': chardet.__version__} + chardet_info = {"version": chardet.__version__} pyopenssl_info = { - 'version': None, - 'openssl_version': '', + "version": None, + "openssl_version": "", } if OpenSSL: pyopenssl_info = { - 'version': OpenSSL.__version__, - 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, + "version": OpenSSL.__version__, + "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", } cryptography_info = { - 'version': getattr(cryptography, '__version__', ''), + "version": getattr(cryptography, "__version__", ""), } idna_info = { - 'version': getattr(idna, '__version__', ''), + "version": getattr(idna, "__version__", ""), } system_ssl = ssl.OPENSSL_VERSION_NUMBER - system_ssl_info = { - 'version': '%x' % system_ssl if system_ssl is not None else '' - } + system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} return { - 'platform': platform_info, - 'implementation': implementation_info, - 'system_ssl': system_ssl_info, - 'using_pyopenssl': pyopenssl is not None, - 'using_charset_normalizer': chardet is None, - 'pyOpenSSL': pyopenssl_info, - 'urllib3': urllib3_info, - 'chardet': chardet_info, - 'charset_normalizer': charset_normalizer_info, - 'cryptography': cryptography_info, - 'idna': idna_info, - 'requests': { - 'version': requests_version, + "platform": platform_info, + "implementation": implementation_info, + "system_ssl": system_ssl_info, + "using_pyopenssl": pyopenssl is not None, + "using_charset_normalizer": chardet is None, + "pyOpenSSL": pyopenssl_info, + "urllib3": urllib3_info, + "chardet": chardet_info, + "charset_normalizer": charset_normalizer_info, + "cryptography": cryptography_info, + "idna": idna_info, + "requests": { + "version": requests_version, }, } @@ -131,5 +130,5 @@ def main(): print(json.dumps(info(), sort_keys=True, indent=2)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/hooks.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/hooks.py old mode 100755 new mode 100644 index 7a51f21..d181ba2 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/hooks.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/hooks.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.hooks ~~~~~~~~~~~~~~ @@ -11,12 +9,13 @@ ``response``: The response generated from a Request. """ -HOOKS = ['response'] +HOOKS = ["response"] def default_hooks(): return {event: [] for event in HOOKS} + # TODO: response is the only one @@ -25,7 +24,7 @@ def dispatch_hook(key, hooks, hook_data, **kwargs): hooks = hooks or {} hooks = hooks.get(key) if hooks: - if hasattr(hooks, '__call__'): + if hasattr(hooks, "__call__"): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/models.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/models.py old mode 100755 new mode 100644 index aa6fb86..7e15228 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/models.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/models.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.models ~~~~~~~~~~~~~~~ @@ -8,46 +6,72 @@ """ import datetime -import sys # Import encoding now, to avoid implicit import later. # Implicit import within threads may cause LookupError when standard library is in a ZIP, # such as in Embedded Python. See https://github.com/psf/requests/issues/3578. -import encodings.idna +import encodings.idna # noqa: F401 +from io import UnsupportedOperation +from urllib3.exceptions import ( + DecodeError, + LocationParseError, + ProtocolError, + ReadTimeoutError, + SSLError, +) from urllib3.fields import RequestField from urllib3.filepost import encode_multipart_formdata from urllib3.util import parse_url -from urllib3.exceptions import ( - DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) - -from io import UnsupportedOperation -from .hooks import default_hooks -from .structures import CaseInsensitiveDict -from .auth import HTTPBasicAuth -from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar -from .exceptions import ( - HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, - ContentDecodingError, ConnectionError, StreamConsumedError, InvalidJSONError) from ._internal_utils import to_native_string, unicode_is_ascii -from .utils import ( - guess_filename, get_auth_from_url, requote_uri, - stream_decode_response_unicode, to_key_val_list, parse_header_links, - iter_slices, guess_json_utf, super_len, check_header_validity) +from .auth import HTTPBasicAuth from .compat import ( - Callable, Mapping, - cookielib, urlunparse, urlsplit, urlencode, str, bytes, - is_py2, chardet, builtin_str, basestring) + Callable, + JSONDecodeError, + Mapping, + basestring, + builtin_str, + chardet, + cookielib, +) from .compat import json as complexjson +from .compat import urlencode, urlsplit, urlunparse +from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header +from .exceptions import ( + ChunkedEncodingError, + ConnectionError, + ContentDecodingError, + HTTPError, + InvalidJSONError, + InvalidURL, +) +from .exceptions import JSONDecodeError as RequestsJSONDecodeError +from .exceptions import MissingSchema +from .exceptions import SSLError as RequestsSSLError +from .exceptions import StreamConsumedError +from .hooks import default_hooks from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( + check_header_validity, + get_auth_from_url, + guess_filename, + guess_json_utf, + iter_slices, + parse_header_links, + requote_uri, + stream_decode_response_unicode, + super_len, + to_key_val_list, +) #: The set of HTTP status codes that indicate an automatically #: processable redirect. REDIRECT_STATI = ( - codes.moved, # 301 - codes.found, # 302 - codes.other, # 303 + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 codes.temporary_redirect, # 307 codes.permanent_redirect, # 308 ) @@ -57,7 +81,7 @@ ITER_CHUNK_SIZE = 512 -class RequestEncodingMixin(object): +class RequestEncodingMixin: @property def path_url(self): """Build the path URL to use.""" @@ -68,16 +92,16 @@ def path_url(self): path = p.path if not path: - path = '/' + path = "/" url.append(path) query = p.query if query: - url.append('?') + url.append("?") url.append(query) - return ''.join(url) + return "".join(url) @staticmethod def _encode_params(data): @@ -90,18 +114,21 @@ def _encode_params(data): if isinstance(data, (str, bytes)): return data - elif hasattr(data, 'read'): + elif hasattr(data, "read"): return data - elif hasattr(data, '__iter__'): + elif hasattr(data, "__iter__"): result = [] for k, vs in to_key_val_list(data): - if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): + if isinstance(vs, basestring) or not hasattr(vs, "__iter__"): vs = [vs] for v in vs: if v is not None: result.append( - (k.encode('utf-8') if isinstance(k, str) else k, - v.encode('utf-8') if isinstance(v, str) else v)) + ( + k.encode("utf-8") if isinstance(k, str) else k, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) return urlencode(result, doseq=True) else: return data @@ -116,7 +143,7 @@ def _encode_files(files, data): The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). """ - if (not files): + if not files: raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") @@ -126,7 +153,7 @@ def _encode_files(files, data): files = to_key_val_list(files or {}) for field, val in fields: - if isinstance(val, basestring) or not hasattr(val, '__iter__'): + if isinstance(val, basestring) or not hasattr(val, "__iter__"): val = [val] for v in val: if v is not None: @@ -135,8 +162,13 @@ def _encode_files(files, data): v = str(v) new_fields.append( - (field.decode('utf-8') if isinstance(field, bytes) else field, - v.encode('utf-8') if isinstance(v, str) else v)) + ( + field.decode("utf-8") + if isinstance(field, bytes) + else field, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) for (k, v) in files: # support for explicit filename @@ -155,7 +187,7 @@ def _encode_files(files, data): if isinstance(fp, (str, bytes, bytearray)): fdata = fp - elif hasattr(fp, 'read'): + elif hasattr(fp, "read"): fdata = fp.read() elif fp is None: continue @@ -171,16 +203,16 @@ def _encode_files(files, data): return body, content_type -class RequestHooksMixin(object): +class RequestHooksMixin: def register_hook(self, event, hook): """Properly register a hook.""" if event not in self.hooks: - raise ValueError('Unsupported event specified, with event name "%s"' % (event)) + raise ValueError(f'Unsupported event specified, with event name "{event}"') if isinstance(hook, Callable): self.hooks[event].append(hook) - elif hasattr(hook, '__iter__'): + elif hasattr(hook, "__iter__"): self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) def deregister_hook(self, event, hook): @@ -223,9 +255,19 @@ class Request(RequestHooksMixin): """ - def __init__(self, - method=None, url=None, headers=None, files=None, data=None, - params=None, auth=None, cookies=None, hooks=None, json=None): + def __init__( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): # Default empty dicts for dict params. data = [] if data is None else data @@ -249,7 +291,7 @@ def __init__(self, self.cookies = cookies def __repr__(self): - return '' % (self.method) + return f"" def prepare(self): """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" @@ -307,9 +349,19 @@ def __init__(self): #: integer denoting starting position of a readable file-like body. self._body_position = None - def prepare(self, - method=None, url=None, headers=None, files=None, data=None, - params=None, auth=None, cookies=None, hooks=None, json=None): + def prepare( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): """Prepares the entire request with the given parameters.""" self.prepare_method(method) @@ -326,7 +378,7 @@ def prepare(self, self.prepare_hooks(hooks) def __repr__(self): - return '' % (self.method) + return f"" def copy(self): p = PreparedRequest() @@ -350,7 +402,7 @@ def _get_idna_encoded_host(host): import idna try: - host = idna.encode(host, uts46=True).decode('utf-8') + host = idna.encode(host, uts46=True).decode("utf-8") except idna.IDNAError: raise UnicodeError return host @@ -363,9 +415,9 @@ def prepare_url(self, url, params): #: on python 3.x. #: https://github.com/psf/requests/pull/2238 if isinstance(url, bytes): - url = url.decode('utf8') + url = url.decode("utf8") else: - url = unicode(url) if is_py2 else str(url) + url = str(url) # Remove leading whitespaces from url url = url.lstrip() @@ -373,7 +425,7 @@ def prepare_url(self, url, params): # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. - if ':' in url and not url.lower().startswith('http'): + if ":" in url and not url.lower().startswith("http"): self.url = url return @@ -384,13 +436,13 @@ def prepare_url(self, url, params): raise InvalidURL(*e.args) if not scheme: - error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") - error = error.format(to_native_string(url, 'utf8')) - - raise MissingSchema(error) + raise MissingSchema( + f"Invalid URL {url!r}: No scheme supplied. " + f"Perhaps you meant http://{url}?" + ) if not host: - raise InvalidURL("Invalid URL %r: No host supplied" % url) + raise InvalidURL(f"Invalid URL {url!r}: No host supplied") # In general, we want to try IDNA encoding the hostname if the string contains # non-ASCII characters. This allows users to automatically get the correct IDNA @@ -400,33 +452,21 @@ def prepare_url(self, url, params): try: host = self._get_idna_encoded_host(host) except UnicodeError: - raise InvalidURL('URL has an invalid label.') - elif host.startswith(u'*'): - raise InvalidURL('URL has an invalid label.') + raise InvalidURL("URL has an invalid label.") + elif host.startswith(("*", ".")): + raise InvalidURL("URL has an invalid label.") # Carefully reconstruct the network location - netloc = auth or '' + netloc = auth or "" if netloc: - netloc += '@' + netloc += "@" netloc += host if port: - netloc += ':' + str(port) + netloc += f":{port}" # Bare domains aren't valid URLs. if not path: - path = '/' - - if is_py2: - if isinstance(scheme, str): - scheme = scheme.encode('utf-8') - if isinstance(netloc, str): - netloc = netloc.encode('utf-8') - if isinstance(path, str): - path = path.encode('utf-8') - if isinstance(query, str): - query = query.encode('utf-8') - if isinstance(fragment, str): - fragment = fragment.encode('utf-8') + path = "/" if isinstance(params, (str, bytes)): params = to_native_string(params) @@ -434,7 +474,7 @@ def prepare_url(self, url, params): enc_params = self._encode_params(params) if enc_params: if query: - query = '%s&%s' % (query, enc_params) + query = f"{query}&{enc_params}" else: query = enc_params @@ -465,20 +505,22 @@ def prepare_body(self, data, files, json=None): if not data and json is not None: # urllib3 requires a bytes-like body. Python 2's json.dumps # provides this natively, but Python 3 gives a Unicode string. - content_type = 'application/json' + content_type = "application/json" try: - body = complexjson.dumps(json, allow_nan=False) + body = complexjson.dumps(json, allow_nan=False) except ValueError as ve: - raise InvalidJSONError(ve, request=self) + raise InvalidJSONError(ve, request=self) if not isinstance(body, bytes): - body = body.encode('utf-8') + body = body.encode("utf-8") - is_stream = all([ - hasattr(data, '__iter__'), - not isinstance(data, (basestring, list, tuple, Mapping)) - ]) + is_stream = all( + [ + hasattr(data, "__iter__"), + not isinstance(data, (basestring, list, tuple, Mapping)), + ] + ) if is_stream: try: @@ -488,24 +530,26 @@ def prepare_body(self, data, files, json=None): body = data - if getattr(body, 'tell', None) is not None: + if getattr(body, "tell", None) is not None: # Record the current file position before reading. # This will allow us to rewind a file in the event # of a redirect. try: self._body_position = body.tell() - except (IOError, OSError): + except OSError: # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body self._body_position = object() if files: - raise NotImplementedError('Streamed bodies and files are mutually exclusive.') + raise NotImplementedError( + "Streamed bodies and files are mutually exclusive." + ) if length: - self.headers['Content-Length'] = builtin_str(length) + self.headers["Content-Length"] = builtin_str(length) else: - self.headers['Transfer-Encoding'] = 'chunked' + self.headers["Transfer-Encoding"] = "chunked" else: # Multi-part file uploads. if files: @@ -513,16 +557,16 @@ def prepare_body(self, data, files, json=None): else: if data: body = self._encode_params(data) - if isinstance(data, basestring) or hasattr(data, 'read'): + if isinstance(data, basestring) or hasattr(data, "read"): content_type = None else: - content_type = 'application/x-www-form-urlencoded' + content_type = "application/x-www-form-urlencoded" self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. - if content_type and ('content-type' not in self.headers): - self.headers['Content-Type'] = content_type + if content_type and ("content-type" not in self.headers): + self.headers["Content-Type"] = content_type self.body = body @@ -533,13 +577,16 @@ def prepare_content_length(self, body): if length: # If length exists, set it. Otherwise, we fallback # to Transfer-Encoding: chunked. - self.headers['Content-Length'] = builtin_str(length) - elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: + self.headers["Content-Length"] = builtin_str(length) + elif ( + self.method not in ("GET", "HEAD") + and self.headers.get("Content-Length") is None + ): # Set Content-Length to 0 for methods that can have a body # but don't provide one. (i.e. not GET or HEAD) - self.headers['Content-Length'] = '0' + self.headers["Content-Length"] = "0" - def prepare_auth(self, auth, url=''): + def prepare_auth(self, auth, url=""): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. @@ -579,7 +626,7 @@ def prepare_cookies(self, cookies): cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: - self.headers['Cookie'] = cookie_header + self.headers["Cookie"] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" @@ -591,14 +638,22 @@ def prepare_hooks(self, hooks): self.register_hook(event, hooks[event]) -class Response(object): +class Response: """The :class:`Response ` object, which contains a server's response to an HTTP request. """ __attrs__ = [ - '_content', 'status_code', 'headers', 'url', 'history', - 'encoding', 'reason', 'cookies', 'elapsed', 'request' + "_content", + "status_code", + "headers", + "url", + "history", + "encoding", + "reason", + "cookies", + "elapsed", + "request", ] def __init__(self): @@ -667,11 +722,11 @@ def __setstate__(self, state): setattr(self, name, value) # pickled objects do not have .raw - setattr(self, '_content_consumed', True) - setattr(self, 'raw', None) + setattr(self, "_content_consumed", True) + setattr(self, "raw", None) def __repr__(self): - return '' % (self.status_code) + return f"" def __bool__(self): """Returns True if :attr:`status_code` is less than 400. @@ -717,12 +772,15 @@ def is_redirect(self): """True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). """ - return ('location' in self.headers and self.status_code in REDIRECT_STATI) + return "location" in self.headers and self.status_code in REDIRECT_STATI @property def is_permanent_redirect(self): """True if this Response one of the permanent versions of redirect.""" - return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) + return "location" in self.headers and self.status_code in ( + codes.moved_permanently, + codes.permanent_redirect, + ) @property def next(self): @@ -732,7 +790,7 @@ def next(self): @property def apparent_encoding(self): """The apparent encoding, provided by the charset_normalizer or chardet libraries.""" - return chardet.detect(self.content)['encoding'] + return chardet.detect(self.content)["encoding"] def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the @@ -753,7 +811,7 @@ def iter_content(self, chunk_size=1, decode_unicode=False): def generate(): # Special case for urllib3. - if hasattr(self.raw, 'stream'): + if hasattr(self.raw, "stream"): try: for chunk in self.raw.stream(chunk_size, decode_content=True): yield chunk @@ -763,6 +821,8 @@ def generate(): raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) + except SSLError as e: + raise RequestsSSLError(e) else: # Standard file-like object. while True: @@ -776,7 +836,9 @@ def generate(): if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() elif chunk_size is not None and not isinstance(chunk_size, int): - raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) + raise TypeError( + f"chunk_size must be an int, it is instead a {type(chunk_size)}." + ) # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) @@ -789,7 +851,9 @@ def generate(): return chunks - def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): + def iter_lines( + self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None + ): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. @@ -799,7 +863,9 @@ def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter pending = None - for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): + for chunk in self.iter_content( + chunk_size=chunk_size, decode_unicode=decode_unicode + ): if pending is not None: chunk = pending + chunk @@ -814,8 +880,7 @@ def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter else: pending = None - for line in lines: - yield line + yield from lines if pending is not None: yield pending @@ -827,13 +892,12 @@ def content(self): if self._content is False: # Read the contents. if self._content_consumed: - raise RuntimeError( - 'The content for this response was already consumed') + raise RuntimeError("The content for this response was already consumed") if self.status_code == 0 or self.raw is None: self._content = None else: - self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'' + self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b"" self._content_consumed = True # don't need to release the connection; that's been handled by urllib3 @@ -858,7 +922,7 @@ def text(self): encoding = self.encoding if not self.content: - return str('') + return "" # Fallback to auto-detected encoding. if self.encoding is None: @@ -866,7 +930,7 @@ def text(self): # Decode unicode from given encoding. try: - content = str(self.content, encoding, errors='replace') + content = str(self.content, encoding, errors="replace") except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. @@ -874,7 +938,7 @@ def text(self): # A TypeError can be raised if encoding is None # # So we try blindly encoding. - content = str(self.content, errors='replace') + content = str(self.content, errors="replace") return content @@ -882,12 +946,8 @@ def json(self, **kwargs): r"""Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. - :raises simplejson.JSONDecodeError: If the response body does not - contain valid json and simplejson is installed. - :raises json.JSONDecodeError: If the response body does not contain - valid json and simplejson is not installed on Python 3. - :raises ValueError: If the response body does not contain valid - json and simplejson is not installed on Python 2. + :raises requests.exceptions.JSONDecodeError: If the response body does not + contain valid json. """ if not self.encoding and self.content and len(self.content) > 3: @@ -898,56 +958,65 @@ def json(self, **kwargs): encoding = guess_json_utf(self.content) if encoding is not None: try: - return complexjson.loads( - self.content.decode(encoding), **kwargs - ) + return complexjson.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass - return complexjson.loads(self.text, **kwargs) + except JSONDecodeError as e: + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + try: + return complexjson.loads(self.text, **kwargs) + except JSONDecodeError as e: + # Catch JSON-related errors and raise as requests.JSONDecodeError + # This aliases json.JSONDecodeError and simplejson.JSONDecodeError + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) @property def links(self): """Returns the parsed header links of the response, if any.""" - header = self.headers.get('link') + header = self.headers.get("link") - # l = MultiDict() - l = {} + resolved_links = {} if header: links = parse_header_links(header) for link in links: - key = link.get('rel') or link.get('url') - l[key] = link + key = link.get("rel") or link.get("url") + resolved_links[key] = link - return l + return resolved_links def raise_for_status(self): """Raises :class:`HTTPError`, if one occurred.""" - http_error_msg = '' + http_error_msg = "" if isinstance(self.reason, bytes): # We attempt to decode utf-8 first because some servers # choose to localize their reason strings. If the string # isn't utf-8, we fall back to iso-8859-1 for all other # encodings. (See PR #3538) try: - reason = self.reason.decode('utf-8') + reason = self.reason.decode("utf-8") except UnicodeDecodeError: - reason = self.reason.decode('iso-8859-1') + reason = self.reason.decode("iso-8859-1") else: reason = self.reason if 400 <= self.status_code < 500: - http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) + http_error_msg = ( + f"{self.status_code} Client Error: {reason} for url: {self.url}" + ) elif 500 <= self.status_code < 600: - http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) + http_error_msg = ( + f"{self.status_code} Server Error: {reason} for url: {self.url}" + ) if http_error_msg: raise HTTPError(http_error_msg, response=self) @@ -961,6 +1030,6 @@ def close(self): if not self._content_consumed: self.raw.close() - release_conn = getattr(self.raw, 'release_conn', None) + release_conn = getattr(self.raw, "release_conn", None) if release_conn is not None: release_conn() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/packages.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/packages.py old mode 100755 new mode 100644 index 00196bf..77c45c9 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/packages.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/packages.py @@ -3,24 +3,26 @@ try: import chardet except ImportError: - import charset_normalizer as chardet import warnings - warnings.filterwarnings('ignore', 'Trying to detect', module='charset_normalizer') + import charset_normalizer as chardet + + warnings.filterwarnings("ignore", "Trying to detect", module="charset_normalizer") # This code exists for backwards compatibility reasons. # I don't like it either. Just look the other way. :) -for package in ('urllib3', 'idna'): +for package in ("urllib3", "idna"): locals()[package] = __import__(package) # This traversal is apparently necessary such that the identities are # preserved (requests.packages.urllib3.* is urllib3.*) for mod in list(sys.modules): - if mod == package or mod.startswith(package + '.'): - sys.modules['requests.packages.' + mod] = sys.modules[mod] + if mod == package or mod.startswith(f"{package}."): + sys.modules[f"requests.packages.{mod}"] = sys.modules[mod] target = chardet.__name__ for mod in list(sys.modules): - if mod == target or mod.startswith(target + '.'): - sys.modules['requests.packages.' + target.replace(target, 'chardet')] = sys.modules[mod] + if mod == target or mod.startswith(f"{target}."): + target = target.replace(target, "chardet") + sys.modules[f"requests.packages.{target}"] = sys.modules[mod] # Kinda cool, though, right? diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/sessions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/sessions.py old mode 100755 new mode 100644 index ae4bcc8..6cb3b4d --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/sessions.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/sessions.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.sessions ~~~~~~~~~~~~~~~~~ @@ -10,39 +8,52 @@ import os import sys import time -from datetime import timedelta from collections import OrderedDict +from datetime import timedelta +from ._internal_utils import to_native_string +from .adapters import HTTPAdapter from .auth import _basic_auth_str -from .compat import cookielib, is_py3, urljoin, urlparse, Mapping +from .compat import Mapping, cookielib, urljoin, urlparse from .cookies import ( - cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) -from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT -from .hooks import default_hooks, dispatch_hook -from ._internal_utils import to_native_string -from .utils import to_key_val_list, default_headers, DEFAULT_PORTS + RequestsCookieJar, + cookiejar_from_dict, + extract_cookies_to_jar, + merge_cookies, +) from .exceptions import ( - TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) - -from .structures import CaseInsensitiveDict -from .adapters import HTTPAdapter - -from .utils import ( - requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, - get_auth_from_url, rewind_body + ChunkedEncodingError, + ContentDecodingError, + InvalidSchema, + TooManyRedirects, ) - -from .status_codes import codes +from .hooks import default_hooks, dispatch_hook # formerly defined here, reexposed here for backward compatibility -from .models import REDIRECT_STATI +from .models import ( # noqa: F401 + DEFAULT_REDIRECT_LIMIT, + REDIRECT_STATI, + PreparedRequest, + Request, +) +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( # noqa: F401 + DEFAULT_PORTS, + default_headers, + get_auth_from_url, + get_environ_proxies, + get_netrc_auth, + requote_uri, + resolve_proxies, + rewind_body, + should_bypass_proxies, + to_key_val_list, +) # Preferred clock, based on which one is more accurate on a given system. -if sys.platform == 'win32': - try: # Python 3.4+ - preferred_clock = time.perf_counter - except AttributeError: # Earlier than Python 3. - preferred_clock = time.clock +if sys.platform == "win32": + preferred_clock = time.perf_counter else: preferred_clock = time.time @@ -61,8 +72,7 @@ def merge_setting(request_setting, session_setting, dict_class=OrderedDict): # Bypass if not a dictionary (e.g. verify) if not ( - isinstance(session_setting, Mapping) and - isinstance(request_setting, Mapping) + isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting @@ -84,17 +94,16 @@ def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ - if session_hooks is None or session_hooks.get('response') == []: + if session_hooks is None or session_hooks.get("response") == []: return request_hooks - if request_hooks is None or request_hooks.get('response') == []: + if request_hooks is None or request_hooks.get("response") == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) -class SessionRedirectMixin(object): - +class SessionRedirectMixin: def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will @@ -104,16 +113,15 @@ def get_redirect_target(self, resp): # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: - location = resp.headers['location'] + location = resp.headers["location"] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. - if is_py3: - location = location.encode('latin1') - return to_native_string(location, 'utf8') + location = location.encode("latin1") + return to_native_string(location, "utf8") return None def should_strip_auth(self, old_url, new_url): @@ -126,23 +134,40 @@ def should_strip_auth(self, old_url, new_url): # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. - if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) - and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): + if ( + old_parsed.scheme == "http" + and old_parsed.port in (80, None) + and new_parsed.scheme == "https" + and new_parsed.port in (443, None) + ): return False # Handle default port usage corresponding to scheme. changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) - if (not changed_scheme and old_parsed.port in default_port - and new_parsed.port in default_port): + if ( + not changed_scheme + and old_parsed.port in default_port + and new_parsed.port in default_port + ): return False # Standard case: root URI must match return changed_port or changed_scheme - def resolve_redirects(self, resp, req, stream=False, timeout=None, - verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): + def resolve_redirects( + self, + resp, + req, + stream=False, + timeout=None, + verify=True, + cert=None, + proxies=None, + yield_requests=False, + **adapter_kwargs, + ): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history @@ -163,19 +188,21 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None, resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: - raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp) + raise TooManyRedirects( + f"Exceeded {self.max_redirects} redirects.", response=resp + ) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) - if url.startswith('//'): + if url.startswith("//"): parsed_rurl = urlparse(resp.url) - url = ':'.join([to_native_string(parsed_rurl.scheme), url]) + url = ":".join([to_native_string(parsed_rurl.scheme), url]) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) - if parsed.fragment == '' and previous_fragment: + if parsed.fragment == "" and previous_fragment: parsed = parsed._replace(fragment=previous_fragment) elif parsed.fragment: previous_fragment = parsed.fragment @@ -194,15 +221,18 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None, self.rebuild_method(prepared_request, resp) # https://github.com/psf/requests/issues/1084 - if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): + if resp.status_code not in ( + codes.temporary_redirect, + codes.permanent_redirect, + ): # https://github.com/psf/requests/issues/3490 - purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') + purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers - headers.pop('Cookie', None) + headers.pop("Cookie", None) # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared @@ -218,9 +248,8 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None, # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. - rewindable = ( - prepared_request._body_position is not None and - ('Content-Length' in headers or 'Transfer-Encoding' in headers) + rewindable = prepared_request._body_position is not None and ( + "Content-Length" in headers or "Transfer-Encoding" in headers ) # Attempt to rewind consumed file-like object. @@ -242,7 +271,7 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None, cert=cert, proxies=proxies, allow_redirects=False, - **adapter_kwargs + **adapter_kwargs, ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) @@ -259,17 +288,18 @@ def rebuild_auth(self, prepared_request, response): headers = prepared_request.headers url = prepared_request.url - if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): + if "Authorization" in headers and self.should_strip_auth( + response.request.url, url + ): # If we get redirected to a new host, we should strip out any # authentication headers. - del headers['Authorization'] + del headers["Authorization"] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) - def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by @@ -282,24 +312,12 @@ def rebuild_proxies(self, prepared_request, proxies): :rtype: dict """ - proxies = proxies if proxies is not None else {} headers = prepared_request.headers - url = prepared_request.url - scheme = urlparse(url).scheme - new_proxies = proxies.copy() - no_proxy = proxies.get('no_proxy') + scheme = urlparse(prepared_request.url).scheme + new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) - bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) - if self.trust_env and not bypass_proxy: - environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) - - proxy = environ_proxies.get(scheme, environ_proxies.get('all')) - - if proxy: - new_proxies.setdefault(scheme, proxy) - - if 'Proxy-Authorization' in headers: - del headers['Proxy-Authorization'] + if "Proxy-Authorization" in headers: + del headers["Proxy-Authorization"] try: username, password = get_auth_from_url(new_proxies[scheme]) @@ -307,7 +325,7 @@ def rebuild_proxies(self, prepared_request, proxies): username, password = None, None if username and password: - headers['Proxy-Authorization'] = _basic_auth_str(username, password) + headers["Proxy-Authorization"] = _basic_auth_str(username, password) return new_proxies @@ -318,18 +336,18 @@ def rebuild_method(self, prepared_request, response): method = prepared_request.method # https://tools.ietf.org/html/rfc7231#section-6.4.4 - if response.status_code == codes.see_other and method != 'HEAD': - method = 'GET' + if response.status_code == codes.see_other and method != "HEAD": + method = "GET" # Do what the browsers do, despite standards... # First, turn 302s into GETs. - if response.status_code == codes.found and method != 'HEAD': - method = 'GET' + if response.status_code == codes.found and method != "HEAD": + method = "GET" # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. - if response.status_code == codes.moved and method == 'POST': - method = 'GET' + if response.status_code == codes.moved and method == "POST": + method = "GET" prepared_request.method = method @@ -354,9 +372,18 @@ class Session(SessionRedirectMixin): """ __attrs__ = [ - 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', - 'cert', 'adapters', 'stream', 'trust_env', - 'max_redirects', + "headers", + "cookies", + "auth", + "proxies", + "hooks", + "params", + "verify", + "cert", + "adapters", + "stream", + "trust_env", + "max_redirects", ] def __init__(self): @@ -418,8 +445,8 @@ def __init__(self): # Default connection adapters. self.adapters = OrderedDict() - self.mount('https://', HTTPAdapter()) - self.mount('http://', HTTPAdapter()) + self.mount("https://", HTTPAdapter()) + self.mount("http://", HTTPAdapter()) def __enter__(self): return self @@ -445,7 +472,8 @@ def prepare_request(self, request): # Merge with session cookies merged_cookies = merge_cookies( - merge_cookies(RequestsCookieJar(), self.cookies), cookies) + merge_cookies(RequestsCookieJar(), self.cookies), cookies + ) # Set environment's basic authentication if not explicitly set. auth = request.auth @@ -459,7 +487,9 @@ def prepare_request(self, request): files=request.files, data=request.data, json=request.json, - headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), + headers=merge_setting( + request.headers, self.headers, dict_class=CaseInsensitiveDict + ), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, @@ -467,10 +497,25 @@ def prepare_request(self, request): ) return p - def request(self, method, url, - params=None, data=None, headers=None, cookies=None, files=None, - auth=None, timeout=None, allow_redirects=True, proxies=None, - hooks=None, stream=None, verify=None, cert=None, json=None): + def request( + self, + method, + url, + params=None, + data=None, + headers=None, + cookies=None, + files=None, + auth=None, + timeout=None, + allow_redirects=True, + proxies=None, + hooks=None, + stream=None, + verify=None, + cert=None, + json=None, + ): """Constructs a :class:`Request `, prepares it and sends it. Returns :class:`Response ` object. @@ -506,7 +551,7 @@ def request(self, method, url, ``False``, requests will accept any TLS certificate presented by the server, and will ignore hostname mismatches and/or expired certificates, which will make your application vulnerable to - man-in-the-middle (MitM) attacks. Setting verify to ``False`` + man-in-the-middle (MitM) attacks. Setting verify to ``False`` may be useful during local development or testing. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. @@ -535,8 +580,8 @@ def request(self, method, url, # Send the request. send_kwargs = { - 'timeout': timeout, - 'allow_redirects': allow_redirects, + "timeout": timeout, + "allow_redirects": allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) @@ -551,8 +596,8 @@ def get(self, url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', True) - return self.request('GET', url, **kwargs) + kwargs.setdefault("allow_redirects", True) + return self.request("GET", url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. @@ -562,8 +607,8 @@ def options(self, url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', True) - return self.request('OPTIONS', url, **kwargs) + kwargs.setdefault("allow_redirects", True) + return self.request("OPTIONS", url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. @@ -573,8 +618,8 @@ def head(self, url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', False) - return self.request('HEAD', url, **kwargs) + kwargs.setdefault("allow_redirects", False) + return self.request("HEAD", url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. @@ -587,7 +632,7 @@ def post(self, url, data=None, json=None, **kwargs): :rtype: requests.Response """ - return self.request('POST', url, data=data, json=json, **kwargs) + return self.request("POST", url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. @@ -599,7 +644,7 @@ def put(self, url, data=None, **kwargs): :rtype: requests.Response """ - return self.request('PUT', url, data=data, **kwargs) + return self.request("PUT", url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. @@ -611,7 +656,7 @@ def patch(self, url, data=None, **kwargs): :rtype: requests.Response """ - return self.request('PATCH', url, data=data, **kwargs) + return self.request("PATCH", url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. @@ -621,7 +666,7 @@ def delete(self, url, **kwargs): :rtype: requests.Response """ - return self.request('DELETE', url, **kwargs) + return self.request("DELETE", url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. @@ -630,19 +675,20 @@ def send(self, request, **kwargs): """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. - kwargs.setdefault('stream', self.stream) - kwargs.setdefault('verify', self.verify) - kwargs.setdefault('cert', self.cert) - kwargs.setdefault('proxies', self.rebuild_proxies(request, self.proxies)) + kwargs.setdefault("stream", self.stream) + kwargs.setdefault("verify", self.verify) + kwargs.setdefault("cert", self.cert) + if "proxies" not in kwargs: + kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): - raise ValueError('You can only send PreparedRequests.') + raise ValueError("You can only send PreparedRequests.") # Set up variables needed for resolve_redirects and dispatching of hooks - allow_redirects = kwargs.pop('allow_redirects', True) - stream = kwargs.get('stream') + allow_redirects = kwargs.pop("allow_redirects", True) + stream = kwargs.get("stream") hooks = request.hooks # Get the appropriate adapter to use @@ -659,7 +705,7 @@ def send(self, request, **kwargs): r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks - r = dispatch_hook('response', hooks, r, **kwargs) + r = dispatch_hook("response", hooks, r, **kwargs) # Persist cookies if r.history: @@ -689,7 +735,9 @@ def send(self, request, **kwargs): # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: - r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) + r._next = next( + self.resolve_redirects(r, request, yield_requests=True, **kwargs) + ) except StopIteration: pass @@ -707,16 +755,19 @@ def merge_environment_settings(self, url, proxies, stream, verify, cert): # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. - no_proxy = proxies.get('no_proxy') if proxies is not None else None + no_proxy = proxies.get("no_proxy") if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) - # Look for requests environment configuration and be compatible - # with cURL. + # Look for requests environment configuration + # and be compatible with cURL. if verify is True or verify is None: - verify = (os.environ.get('REQUESTS_CA_BUNDLE') or - os.environ.get('CURL_CA_BUNDLE')) + verify = ( + os.environ.get("REQUESTS_CA_BUNDLE") + or os.environ.get("CURL_CA_BUNDLE") + or verify + ) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) @@ -724,8 +775,7 @@ def merge_environment_settings(self, url, proxies, stream, verify, cert): verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) - return {'verify': verify, 'proxies': proxies, 'stream': stream, - 'cert': cert} + return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} def get_adapter(self, url): """ @@ -739,7 +789,7 @@ def get_adapter(self, url): return adapter # Nothing matches :-/ - raise InvalidSchema("No connection adapters were found for {!r}".format(url)) + raise InvalidSchema(f"No connection adapters were found for {url!r}") def close(self): """Closes all adapters and as such the session""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/status_codes.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/status_codes.py old mode 100755 new mode 100644 index d80a7cd..4bd072b --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/status_codes.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/status_codes.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary @@ -23,101 +21,108 @@ from .structures import LookupDict _codes = { - # Informational. - 100: ('continue',), - 101: ('switching_protocols',), - 102: ('processing',), - 103: ('checkpoint',), - 122: ('uri_too_long', 'request_uri_too_long'), - 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), - 201: ('created',), - 202: ('accepted',), - 203: ('non_authoritative_info', 'non_authoritative_information'), - 204: ('no_content',), - 205: ('reset_content', 'reset'), - 206: ('partial_content', 'partial'), - 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), - 208: ('already_reported',), - 226: ('im_used',), - + 100: ("continue",), + 101: ("switching_protocols",), + 102: ("processing",), + 103: ("checkpoint",), + 122: ("uri_too_long", "request_uri_too_long"), + 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), + 201: ("created",), + 202: ("accepted",), + 203: ("non_authoritative_info", "non_authoritative_information"), + 204: ("no_content",), + 205: ("reset_content", "reset"), + 206: ("partial_content", "partial"), + 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), + 208: ("already_reported",), + 226: ("im_used",), # Redirection. - 300: ('multiple_choices',), - 301: ('moved_permanently', 'moved', '\\o-'), - 302: ('found',), - 303: ('see_other', 'other'), - 304: ('not_modified',), - 305: ('use_proxy',), - 306: ('switch_proxy',), - 307: ('temporary_redirect', 'temporary_moved', 'temporary'), - 308: ('permanent_redirect', - 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 - + 300: ("multiple_choices",), + 301: ("moved_permanently", "moved", "\\o-"), + 302: ("found",), + 303: ("see_other", "other"), + 304: ("not_modified",), + 305: ("use_proxy",), + 306: ("switch_proxy",), + 307: ("temporary_redirect", "temporary_moved", "temporary"), + 308: ( + "permanent_redirect", + "resume_incomplete", + "resume", + ), # "resume" and "resume_incomplete" to be removed in 3.0 # Client Error. - 400: ('bad_request', 'bad'), - 401: ('unauthorized',), - 402: ('payment_required', 'payment'), - 403: ('forbidden',), - 404: ('not_found', '-o-'), - 405: ('method_not_allowed', 'not_allowed'), - 406: ('not_acceptable',), - 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), - 408: ('request_timeout', 'timeout'), - 409: ('conflict',), - 410: ('gone',), - 411: ('length_required',), - 412: ('precondition_failed', 'precondition'), - 413: ('request_entity_too_large',), - 414: ('request_uri_too_large',), - 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), - 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), - 417: ('expectation_failed',), - 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), - 421: ('misdirected_request',), - 422: ('unprocessable_entity', 'unprocessable'), - 423: ('locked',), - 424: ('failed_dependency', 'dependency'), - 425: ('unordered_collection', 'unordered'), - 426: ('upgrade_required', 'upgrade'), - 428: ('precondition_required', 'precondition'), - 429: ('too_many_requests', 'too_many'), - 431: ('header_fields_too_large', 'fields_too_large'), - 444: ('no_response', 'none'), - 449: ('retry_with', 'retry'), - 450: ('blocked_by_windows_parental_controls', 'parental_controls'), - 451: ('unavailable_for_legal_reasons', 'legal_reasons'), - 499: ('client_closed_request',), - + 400: ("bad_request", "bad"), + 401: ("unauthorized",), + 402: ("payment_required", "payment"), + 403: ("forbidden",), + 404: ("not_found", "-o-"), + 405: ("method_not_allowed", "not_allowed"), + 406: ("not_acceptable",), + 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), + 408: ("request_timeout", "timeout"), + 409: ("conflict",), + 410: ("gone",), + 411: ("length_required",), + 412: ("precondition_failed", "precondition"), + 413: ("request_entity_too_large",), + 414: ("request_uri_too_large",), + 415: ("unsupported_media_type", "unsupported_media", "media_type"), + 416: ( + "requested_range_not_satisfiable", + "requested_range", + "range_not_satisfiable", + ), + 417: ("expectation_failed",), + 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), + 421: ("misdirected_request",), + 422: ("unprocessable_entity", "unprocessable"), + 423: ("locked",), + 424: ("failed_dependency", "dependency"), + 425: ("unordered_collection", "unordered"), + 426: ("upgrade_required", "upgrade"), + 428: ("precondition_required", "precondition"), + 429: ("too_many_requests", "too_many"), + 431: ("header_fields_too_large", "fields_too_large"), + 444: ("no_response", "none"), + 449: ("retry_with", "retry"), + 450: ("blocked_by_windows_parental_controls", "parental_controls"), + 451: ("unavailable_for_legal_reasons", "legal_reasons"), + 499: ("client_closed_request",), # Server Error. - 500: ('internal_server_error', 'server_error', '/o\\', '✗'), - 501: ('not_implemented',), - 502: ('bad_gateway',), - 503: ('service_unavailable', 'unavailable'), - 504: ('gateway_timeout',), - 505: ('http_version_not_supported', 'http_version'), - 506: ('variant_also_negotiates',), - 507: ('insufficient_storage',), - 509: ('bandwidth_limit_exceeded', 'bandwidth'), - 510: ('not_extended',), - 511: ('network_authentication_required', 'network_auth', 'network_authentication'), + 500: ("internal_server_error", "server_error", "/o\\", "✗"), + 501: ("not_implemented",), + 502: ("bad_gateway",), + 503: ("service_unavailable", "unavailable"), + 504: ("gateway_timeout",), + 505: ("http_version_not_supported", "http_version"), + 506: ("variant_also_negotiates",), + 507: ("insufficient_storage",), + 509: ("bandwidth_limit_exceeded", "bandwidth"), + 510: ("not_extended",), + 511: ("network_authentication_required", "network_auth", "network_authentication"), } -codes = LookupDict(name='status_codes') +codes = LookupDict(name="status_codes") + def _init(): for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) - if not title.startswith(('\\', '/')): + if not title.startswith(("\\", "/")): setattr(codes, title.upper(), code) def doc(code): - names = ', '.join('``%s``' % n for n in _codes[code]) - return '* %d: %s' % (code, names) + names = ", ".join(f"``{n}``" for n in _codes[code]) + return "* %d: %s" % (code, names) global __doc__ - __doc__ = (__doc__ + '\n' + - '\n'.join(doc(code) for code in sorted(_codes)) - if __doc__ is not None else None) + __doc__ = ( + __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) + if __doc__ is not None + else None + ) + _init() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/structures.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/structures.py old mode 100755 new mode 100644 index 8ee0ba7..188e13e --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/structures.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/structures.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.structures ~~~~~~~~~~~~~~~~~~~ @@ -64,11 +62,7 @@ def __len__(self): def lower_items(self): """Like iteritems(), but with all lowercase keys.""" - return ( - (lowerkey, keyval[1]) - for (lowerkey, keyval) - in self._store.items() - ) + return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) def __eq__(self, other): if isinstance(other, Mapping): @@ -91,10 +85,10 @@ class LookupDict(dict): def __init__(self, name=None): self.name = name - super(LookupDict, self).__init__() + super().__init__() def __repr__(self): - return '' % (self.name) + return f"" def __getitem__(self, key): # We allow fall-through here, so values default to None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/utils.py old mode 100755 new mode 100644 index dbb02a0..ad53583 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/utils.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests/utils.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.utils ~~~~~~~~~~~~~~ @@ -20,27 +18,46 @@ import warnings import zipfile from collections import OrderedDict -from urllib3.util import make_headers -from .__version__ import __version__ +from urllib3.util import make_headers, parse_url + from . import certs +from .__version__ import __version__ + # to_native_string is unused here, but imported here for backwards compatibility -from ._internal_utils import to_native_string +from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401 +from .compat import ( + Mapping, + basestring, + bytes, + getproxies, + getproxies_environment, + integer_types, +) from .compat import parse_http_list as _parse_list_header from .compat import ( - quote, urlparse, bytes, str, unquote, getproxies, - proxy_bypass, urlunparse, basestring, integer_types, is_py3, - proxy_bypass_environment, getproxies_environment, Mapping) + proxy_bypass, + proxy_bypass_environment, + quote, + str, + unquote, + urlparse, + urlunparse, +) from .cookies import cookiejar_from_dict -from .structures import CaseInsensitiveDict from .exceptions import ( - InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) + FileModeWarning, + InvalidHeader, + InvalidURL, + UnrewindableBodyError, +) +from .structures import CaseInsensitiveDict -NETRC_FILES = ('.netrc', '_netrc') +NETRC_FILES = (".netrc", "_netrc") DEFAULT_CA_BUNDLE_PATH = certs.where() -DEFAULT_PORTS = {'http': 80, 'https': 443} +DEFAULT_PORTS = {"http": 80, "https": 443} # Ensure that ', ' is used to preserve previous delimiter behavior. DEFAULT_ACCEPT_ENCODING = ", ".join( @@ -48,28 +65,25 @@ ) -if sys.platform == 'win32': +if sys.platform == "win32": # provide a proxy_bypass version on Windows without DNS lookups def proxy_bypass_registry(host): try: - if is_py3: - import winreg - else: - import _winreg as winreg + import winreg except ImportError: return False try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + internetSettings = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", + ) # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it - proxyEnable = int(winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0]) + proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) # ProxyOverride is almost always a string - proxyOverride = winreg.QueryValueEx(internetSettings, - 'ProxyOverride')[0] - except OSError: + proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] + except (OSError, ValueError): return False if not proxyEnable or not proxyOverride: return False @@ -77,15 +91,15 @@ def proxy_bypass_registry(host): # make a check value list from the registry entry: replace the # '' string by the localhost entry and the corresponding # canonical entry. - proxyOverride = proxyOverride.split(';') + proxyOverride = proxyOverride.split(";") # now check if we match one of the registry values. for test in proxyOverride: - if test == '': - if '.' not in host: + if test == "": + if "." not in host: return True - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char if re.match(test, host, re.I): return True return False @@ -105,7 +119,7 @@ def proxy_bypass(host): # noqa def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" - if hasattr(d, 'items'): + if hasattr(d, "items"): d = d.items() return d @@ -115,37 +129,42 @@ def super_len(o): total_length = None current_position = 0 - if hasattr(o, '__len__'): + if hasattr(o, "__len__"): total_length = len(o) - elif hasattr(o, 'len'): + elif hasattr(o, "len"): total_length = o.len - elif hasattr(o, 'fileno'): + elif hasattr(o, "fileno"): try: fileno = o.fileno() - except io.UnsupportedOperation: + except (io.UnsupportedOperation, AttributeError): + # AttributeError is a surprising exception, seeing as how we've just checked + # that `hasattr(o, 'fileno')`. It happens for objects obtained via + # `Tarfile.extractfile()`, per issue 5229. pass else: total_length = os.fstat(fileno).st_size # Having used fstat to determine the file length, we need to # confirm that this file was opened up in binary mode. - if 'b' not in o.mode: - warnings.warn(( - "Requests has determined the content-length for this " - "request using the binary size of the file: however, the " - "file has been opened in text mode (i.e. without the 'b' " - "flag in the mode). This may lead to an incorrect " - "content-length. In Requests 3.0, support will be removed " - "for files in text mode."), - FileModeWarning + if "b" not in o.mode: + warnings.warn( + ( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode." + ), + FileModeWarning, ) - if hasattr(o, 'tell'): + if hasattr(o, "tell"): try: current_position = o.tell() - except (OSError, IOError): + except OSError: # This can happen in some weird situations, such as when the file # is actually a special file descriptor like stdin. In this # instance, we don't know what the length is, so set it to zero and @@ -153,8 +172,8 @@ def super_len(o): if total_length is not None: current_position = total_length else: - if hasattr(o, 'seek') and total_length is None: - # StringIO and BytesIO have seek but no useable fileno + if hasattr(o, "seek") and total_length is None: + # StringIO and BytesIO have seek but no usable fileno try: # seek to end of file o.seek(0, 2) @@ -163,7 +182,7 @@ def super_len(o): # seek back to current position to support # partially read file-like objects o.seek(current_position or 0) - except (OSError, IOError): + except OSError: total_length = 0 if total_length is None: @@ -175,14 +194,14 @@ def super_len(o): def get_netrc_auth(url, raise_errors=False): """Returns the Requests tuple auth for a given url from netrc.""" - netrc_file = os.environ.get('NETRC') + netrc_file = os.environ.get("NETRC") if netrc_file is not None: netrc_locations = (netrc_file,) else: - netrc_locations = ('~/{}'.format(f) for f in NETRC_FILES) + netrc_locations = (f"~/{f}" for f in NETRC_FILES) try: - from netrc import netrc, NetrcParseError + from netrc import NetrcParseError, netrc netrc_path = None @@ -207,18 +226,18 @@ def get_netrc_auth(url, raise_errors=False): # Strip port numbers from netloc. This weird `if...encode`` dance is # used for Python 3.2, which doesn't support unicode literals. - splitstr = b':' + splitstr = b":" if isinstance(url, str): - splitstr = splitstr.decode('ascii') + splitstr = splitstr.decode("ascii") host = ri.netloc.split(splitstr)[0] try: _netrc = netrc(netrc_path).authenticators(host) if _netrc: # Return with login / password - login_i = (0 if _netrc[0] else 1) + login_i = 0 if _netrc[0] else 1 return (_netrc[login_i], _netrc[2]) - except (NetrcParseError, IOError): + except (NetrcParseError, OSError): # If there was a parsing error or a permissions issue reading the file, # we'll just skip netrc auth unless explicitly asked to raise errors. if raise_errors: @@ -231,9 +250,8 @@ def get_netrc_auth(url, raise_errors=False): def guess_filename(obj): """Tries to guess the filename of the given object.""" - name = getattr(obj, 'name', None) - if (name and isinstance(name, basestring) and name[0] != '<' and - name[-1] != '>'): + name = getattr(obj, "name", None) + if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": return os.path.basename(name) @@ -251,7 +269,11 @@ def extract_zipped_paths(path): archive, member = os.path.split(path) while archive and not os.path.exists(archive): archive, prefix = os.path.split(archive) - member = '/'.join([prefix, member]) + if not prefix: + # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), + # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users + break + member = "/".join([prefix, member]) if not zipfile.is_zipfile(archive): return path @@ -262,7 +284,7 @@ def extract_zipped_paths(path): # we have a valid zip archive and a valid member of that archive tmp = tempfile.gettempdir() - extracted_path = os.path.join(tmp, member.split('/')[-1]) + extracted_path = os.path.join(tmp, member.split("/")[-1]) if not os.path.exists(extracted_path): # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition with atomic_open(extracted_path) as file_handler: @@ -273,12 +295,11 @@ def extract_zipped_paths(path): @contextlib.contextmanager def atomic_open(filename): """Write a file to the disk in an atomic fashion""" - replacer = os.rename if sys.version_info[0] == 2 else os.replace tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) try: - with os.fdopen(tmp_descriptor, 'wb') as tmp_handler: + with os.fdopen(tmp_descriptor, "wb") as tmp_handler: yield tmp_handler - replacer(tmp_name, filename) + os.replace(tmp_name, filename) except BaseException: os.remove(tmp_name) raise @@ -306,7 +327,7 @@ def from_key_val_list(value): return None if isinstance(value, (str, bytes, bool, int)): - raise ValueError('cannot encode objects that are not 2-tuples') + raise ValueError("cannot encode objects that are not 2-tuples") return OrderedDict(value) @@ -332,7 +353,7 @@ def to_key_val_list(value): return None if isinstance(value, (str, bytes, bool, int)): - raise ValueError('cannot encode objects that are not 2-tuples') + raise ValueError("cannot encode objects that are not 2-tuples") if isinstance(value, Mapping): value = value.items() @@ -397,10 +418,10 @@ def parse_dict_header(value): """ result = {} for item in _parse_list_header(value): - if '=' not in item: + if "=" not in item: result[item] = None continue - name, value = item.split('=', 1) + name, value = item.split("=", 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value @@ -428,8 +449,8 @@ def unquote_header_value(value, is_filename=False): # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != '\\\\': - return value.replace('\\\\', '\\').replace('\\"', '"') + if not is_filename or value[:2] != "\\\\": + return value.replace("\\\\", "\\").replace('\\"', '"') return value @@ -464,19 +485,24 @@ def get_encodings_from_content(content): :param content: bytestring to extract encodings from. """ - warnings.warn(( - 'In requests 3.0, get_encodings_from_content will be removed. For ' - 'more information, please see the discussion on issue #2266. (This' - ' warning should only appear once.)'), - DeprecationWarning) + warnings.warn( + ( + "In requests 3.0, get_encodings_from_content will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) charset_re = re.compile(r']', flags=re.I) pragma_re = re.compile(r']', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') - return (charset_re.findall(content) + - pragma_re.findall(content) + - xml_re.findall(content)) + return ( + charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content) + ) def _parse_content_type_header(header): @@ -487,7 +513,7 @@ def _parse_content_type_header(header): parameters """ - tokens = header.split(';') + tokens = header.split(";") content_type, params = tokens[0].strip(), tokens[1:] params_dict = {} items_to_strip = "\"' " @@ -499,7 +525,7 @@ def _parse_content_type_header(header): index_of_equals = param.find("=") if index_of_equals != -1: key = param[:index_of_equals].strip(items_to_strip) - value = param[index_of_equals + 1:].strip(items_to_strip) + value = param[index_of_equals + 1 :].strip(items_to_strip) params_dict[key.lower()] = value return content_type, params_dict @@ -511,38 +537,37 @@ def get_encoding_from_headers(headers): :rtype: str """ - content_type = headers.get('content-type') + content_type = headers.get("content-type") if not content_type: return None content_type, params = _parse_content_type_header(content_type) - if 'charset' in params: - return params['charset'].strip("'\"") + if "charset" in params: + return params["charset"].strip("'\"") - if 'text' in content_type: - return 'ISO-8859-1' + if "text" in content_type: + return "ISO-8859-1" - if 'application/json' in content_type: + if "application/json" in content_type: # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset - return 'utf-8' + return "utf-8" def stream_decode_response_unicode(iterator, r): - """Stream decodes a iterator.""" + """Stream decodes an iterator.""" if r.encoding is None: - for item in iterator: - yield item + yield from iterator return - decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') + decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv - rv = decoder.decode(b'', final=True) + rv = decoder.decode(b"", final=True) if rv: yield rv @@ -553,7 +578,7 @@ def iter_slices(string, slice_length): if slice_length is None or slice_length <= 0: slice_length = len(string) while pos < len(string): - yield string[pos:pos + slice_length] + yield string[pos : pos + slice_length] pos += slice_length @@ -569,11 +594,14 @@ def get_unicode_from_response(r): :rtype: str """ - warnings.warn(( - 'In requests 3.0, get_unicode_from_response will be removed. For ' - 'more information, please see the discussion on issue #2266. (This' - ' warning should only appear once.)'), - DeprecationWarning) + warnings.warn( + ( + "In requests 3.0, get_unicode_from_response will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) tried_encodings = [] @@ -588,14 +616,15 @@ def get_unicode_from_response(r): # Fall back: try: - return str(r.content, encoding, errors='replace') + return str(r.content, encoding, errors="replace") except TypeError: return r.content # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" +) def unquote_unreserved(uri): @@ -604,22 +633,22 @@ def unquote_unreserved(uri): :rtype: str """ - parts = uri.split('%') + parts = uri.split("%") for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: - raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) + raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: - parts[i] = '%' + parts[i] + parts[i] = f"%{parts[i]}" else: - parts[i] = '%' + parts[i] - return ''.join(parts) + parts[i] = f"%{parts[i]}" + return "".join(parts) def requote_uri(uri): @@ -652,10 +681,10 @@ def address_in_network(ip, net): :rtype: bool """ - ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] - netaddr, bits = net.split('/') - netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] - network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask + ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] + netaddr, bits = net.split("/") + netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask return (ipaddr & netmask) == (network & netmask) @@ -666,8 +695,8 @@ def dotted_netmask(mask): :rtype: str """ - bits = 0xffffffff ^ (1 << 32 - mask) - 1 - return socket.inet_ntoa(struct.pack('>I', bits)) + bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack(">I", bits)) def is_ipv4_address(string_ip): @@ -676,7 +705,7 @@ def is_ipv4_address(string_ip): """ try: socket.inet_aton(string_ip) - except socket.error: + except OSError: return False return True @@ -687,9 +716,9 @@ def is_valid_cidr(string_network): :rtype: bool """ - if string_network.count('/') == 1: + if string_network.count("/") == 1: try: - mask = int(string_network.split('/')[1]) + mask = int(string_network.split("/")[1]) except ValueError: return False @@ -697,8 +726,8 @@ def is_valid_cidr(string_network): return False try: - socket.inet_aton(string_network.split('/')[0]) - except socket.error: + socket.inet_aton(string_network.split("/")[0]) + except OSError: return False else: return False @@ -735,13 +764,14 @@ def should_bypass_proxies(url, no_proxy): """ # Prioritize lowercase environment variables over uppercase # to keep a consistent behaviour with other http projects (curl, wget). - get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) + def get_proxy(key): + return os.environ.get(key) or os.environ.get(key.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy_arg = no_proxy if no_proxy is None: - no_proxy = get_proxy('no_proxy') + no_proxy = get_proxy("no_proxy") parsed = urlparse(url) if parsed.hostname is None: @@ -751,9 +781,7 @@ def should_bypass_proxies(url, no_proxy): if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the hostname, both with and without the port. - no_proxy = ( - host for host in no_proxy.replace(' ', '').split(',') if host - ) + no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) if is_ipv4_address(parsed.hostname): for proxy_ip in no_proxy: @@ -767,7 +795,7 @@ def should_bypass_proxies(url, no_proxy): else: host_with_port = parsed.hostname if parsed.port: - host_with_port += ':{}'.format(parsed.port) + host_with_port += f":{parsed.port}" for host in no_proxy: if parsed.hostname.endswith(host) or host_with_port.endswith(host): @@ -775,7 +803,7 @@ def should_bypass_proxies(url, no_proxy): # to apply the proxies on this URL. return True - with set_environ('no_proxy', no_proxy_arg): + with set_environ("no_proxy", no_proxy_arg): # parsed.hostname can be `None` in cases such as a file URI. try: bypass = proxy_bypass(parsed.hostname) @@ -809,13 +837,13 @@ def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: - return proxies.get(urlparts.scheme, proxies.get('all')) + return proxies.get(urlparts.scheme, proxies.get("all")) proxy_keys = [ - urlparts.scheme + '://' + urlparts.hostname, + urlparts.scheme + "://" + urlparts.hostname, urlparts.scheme, - 'all://' + urlparts.hostname, - 'all', + "all://" + urlparts.hostname, + "all", ] proxy = None for proxy_key in proxy_keys: @@ -826,25 +854,54 @@ def select_proxy(url, proxies): return proxy +def resolve_proxies(request, proxies, trust_env=True): + """This method takes proxy information from a request and configuration + input to resolve a mapping of target proxies. This will consider settings + such a NO_PROXY to strip proxy configurations. + + :param request: Request or PreparedRequest + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + :param trust_env: Boolean declaring whether to trust environment configs + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + url = request.url + scheme = urlparse(url).scheme + no_proxy = proxies.get("no_proxy") + new_proxies = proxies.copy() + + if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get("all")) + + if proxy: + new_proxies.setdefault(scheme, proxy) + return new_proxies + + def default_user_agent(name="python-requests"): """ Return a string representing the default user agent. :rtype: str """ - return '%s/%s' % (name, __version__) + return f"{name}/{__version__}" def default_headers(): """ :rtype: requests.structures.CaseInsensitiveDict """ - return CaseInsensitiveDict({ - 'User-Agent': default_user_agent(), - 'Accept-Encoding': DEFAULT_ACCEPT_ENCODING, - 'Accept': '*/*', - 'Connection': 'keep-alive', - }) + return CaseInsensitiveDict( + { + "User-Agent": default_user_agent(), + "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, + "Accept": "*/*", + "Connection": "keep-alive", + } + ) def parse_header_links(value): @@ -857,23 +914,23 @@ def parse_header_links(value): links = [] - replace_chars = ' \'"' + replace_chars = " '\"" value = value.strip(replace_chars) if not value: return links - for val in re.split(', *<', value): + for val in re.split(", *<", value): try: - url, params = val.split(';', 1) + url, params = val.split(";", 1) except ValueError: - url, params = val, '' + url, params = val, "" - link = {'url': url.strip('<> \'"')} + link = {"url": url.strip("<> '\"")} - for param in params.split(';'): + for param in params.split(";"): try: - key, value = param.split('=') + key, value = param.split("=") except ValueError: break @@ -885,7 +942,7 @@ def parse_header_links(value): # Null bytes; no need to recreate these on each call to guess_json_utf -_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 +_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 _null2 = _null * 2 _null3 = _null * 3 @@ -899,25 +956,25 @@ def guess_json_utf(data): # determine the encoding. Also detect a BOM, if present. sample = data[:4] if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): - return 'utf-32' # BOM included + return "utf-32" # BOM included if sample[:3] == codecs.BOM_UTF8: - return 'utf-8-sig' # BOM included, MS style (discouraged) + return "utf-8-sig" # BOM included, MS style (discouraged) if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - return 'utf-16' # BOM included + return "utf-16" # BOM included nullcount = sample.count(_null) if nullcount == 0: - return 'utf-8' + return "utf-8" if nullcount == 2: - if sample[::2] == _null2: # 1st and 3rd are null - return 'utf-16-be' + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" if sample[1::2] == _null2: # 2nd and 4th are null - return 'utf-16-le' + return "utf-16-le" # Did not detect 2 valid UTF-16 ascii-range characters if nullcount == 3: if sample[:3] == _null3: - return 'utf-32-be' + return "utf-32-be" if sample[1:] == _null3: - return 'utf-32-le' + return "utf-32-le" # Did not detect a valid UTF-32 ascii-range character return None @@ -928,15 +985,27 @@ def prepend_scheme_if_needed(url, new_scheme): :rtype: str """ - scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) - - # urlparse is a finicky beast, and sometimes decides that there isn't a - # netloc present. Assume that it's being over-cautious, and switch netloc - # and path if urlparse decided there was no netloc. + parsed = parse_url(url) + scheme, auth, host, port, path, query, fragment = parsed + + # A defect in urlparse determines that there isn't a netloc present in some + # urls. We previously assumed parsing was overly cautious, and swapped the + # netloc and path. Due to a lack of tests on the original defect, this is + # maintained with parse_url for backwards compatibility. + netloc = parsed.netloc if not netloc: netloc, path = path, netloc - return urlunparse((scheme, netloc, path, params, query, fragment)) + if auth: + # parse_url doesn't provide the netloc with auth + # so we'll add it ourselves. + netloc = "@".join([auth, netloc]) + if scheme is None: + scheme = new_scheme + if path is None: + path = "" + + return urlunparse((scheme, netloc, path, "", query, fragment)) def get_auth_from_url(url): @@ -950,35 +1019,36 @@ def get_auth_from_url(url): try: auth = (unquote(parsed.username), unquote(parsed.password)) except (AttributeError, TypeError): - auth = ('', '') + auth = ("", "") return auth -# Moved outside of function to avoid recompile every call -_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') -_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') - - def check_header_validity(header): - """Verifies that header value is a string which doesn't contain - leading whitespace or return characters. This prevents unintended - header injection. + """Verifies that header parts don't contain leading whitespace + reserved characters, or return characters. :param header: tuple, in the format (name, value). """ name, value = header - if isinstance(value, bytes): - pat = _CLEAN_HEADER_REGEX_BYTE - else: - pat = _CLEAN_HEADER_REGEX_STR - try: - if not pat.match(value): - raise InvalidHeader("Invalid return character or leading space in header: %s" % name) - except TypeError: - raise InvalidHeader("Value for header {%s: %s} must be of type str or " - "bytes, not %s" % (name, value, type(value))) + for part in header: + if type(part) not in HEADER_VALIDATORS: + raise InvalidHeader( + f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be " + f"of type str or bytes, not {type(part)}" + ) + + _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0]) + _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1]) + + +def _validate_header_part(header_part, header_kind, validator): + if not validator.match(header_part): + raise InvalidHeader( + f"Invalid leading whitespace, reserved character(s), or return" + f"character(s) in header {header_kind}: {header_part!r}" + ) def urldefragauth(url): @@ -993,21 +1063,24 @@ def urldefragauth(url): if not netloc: netloc, path = path, netloc - netloc = netloc.rsplit('@', 1)[-1] + netloc = netloc.rsplit("@", 1)[-1] - return urlunparse((scheme, netloc, path, params, query, '')) + return urlunparse((scheme, netloc, path, params, query, "")) def rewind_body(prepared_request): """Move file pointer back to its recorded starting position so it can be read again on redirect. """ - body_seek = getattr(prepared_request.body, 'seek', None) - if body_seek is not None and isinstance(prepared_request._body_position, integer_types): + body_seek = getattr(prepared_request.body, "seek", None) + if body_seek is not None and isinstance( + prepared_request._body_position, integer_types + ): try: body_seek(prepared_request._body_position) - except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect.") + except OSError: + raise UnrewindableBodyError( + "An error occurred when rewinding request body for redirect." + ) else: raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/METADATA deleted file mode 100644 index 48678ba..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/METADATA +++ /dev/null @@ -1,142 +0,0 @@ -Metadata-Version: 2.1 -Name: setuptools -Version: 60.7.1 -Summary: Easily download, build, install, upgrade, and uninstall Python packages -Home-page: https://github.com/pypa/setuptools -Author: Python Packaging Authority -Author-email: distutils-sig@python.org -License: UNKNOWN -Project-URL: Documentation, https://setuptools.pypa.io/ -Keywords: CPAN PyPI distutils eggs package management -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: System :: Archiving :: Packaging -Classifier: Topic :: System :: Systems Administration -Classifier: Topic :: Utilities -Requires-Python: >=3.7 -License-File: LICENSE -Provides-Extra: certs -Provides-Extra: docs -Requires-Dist: sphinx ; extra == 'docs' -Requires-Dist: jaraco.packaging (>=8.2) ; extra == 'docs' -Requires-Dist: rst.linker (>=1.9) ; extra == 'docs' -Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs' -Requires-Dist: pygments-github-lexers (==0.0.5) ; extra == 'docs' -Requires-Dist: sphinx-favicon ; extra == 'docs' -Requires-Dist: sphinx-inline-tabs ; extra == 'docs' -Requires-Dist: sphinxcontrib-towncrier ; extra == 'docs' -Requires-Dist: furo ; extra == 'docs' -Provides-Extra: ssl -Provides-Extra: testing -Requires-Dist: pytest (>=6) ; extra == 'testing' -Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing' -Requires-Dist: pytest-flake8 ; extra == 'testing' -Requires-Dist: pytest-cov ; extra == 'testing' -Requires-Dist: pytest-enabler (>=1.0.1) ; extra == 'testing' -Requires-Dist: pytest-perf ; extra == 'testing' -Requires-Dist: mock ; extra == 'testing' -Requires-Dist: flake8-2020 ; extra == 'testing' -Requires-Dist: virtualenv (>=13.0.0) ; extra == 'testing' -Requires-Dist: wheel ; extra == 'testing' -Requires-Dist: pip (>=19.1) ; extra == 'testing' -Requires-Dist: jaraco.envs (>=2.2) ; extra == 'testing' -Requires-Dist: pytest-xdist ; extra == 'testing' -Requires-Dist: sphinx (>=4.3.2) ; extra == 'testing' -Requires-Dist: jaraco.path (>=3.2.0) ; extra == 'testing' -Requires-Dist: build[virtualenv] ; extra == 'testing' -Requires-Dist: filelock (>=3.4.0) ; extra == 'testing' -Requires-Dist: pip-run (>=8.8) ; extra == 'testing' -Provides-Extra: testing-integration -Requires-Dist: pytest ; extra == 'testing-integration' -Requires-Dist: pytest-xdist ; extra == 'testing-integration' -Requires-Dist: pytest-enabler ; extra == 'testing-integration' -Requires-Dist: virtualenv (>=13.0.0) ; extra == 'testing-integration' -Requires-Dist: tomli ; extra == 'testing-integration' -Requires-Dist: wheel ; extra == 'testing-integration' -Requires-Dist: jaraco.path (>=3.2.0) ; extra == 'testing-integration' -Requires-Dist: jaraco.envs (>=2.2) ; extra == 'testing-integration' -Requires-Dist: build[virtualenv] ; extra == 'testing-integration' -Requires-Dist: filelock (>=3.4.0) ; extra == 'testing-integration' -Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing' -Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing' - -.. image:: https://raw.githubusercontent.com/pypa/setuptools/main/docs/images/banner-640x320.svg - :align: center - -| - -.. image:: https://img.shields.io/pypi/v/setuptools.svg - :target: `PyPI link`_ - -.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg - :target: `PyPI link`_ - -.. _PyPI link: https://pypi.org/project/setuptools - -.. image:: https://github.com/pypa/setuptools/workflows/tests/badge.svg - :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22 - :alt: tests - -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - :alt: Code style: Black - -.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg - :target: https://setuptools.pypa.io - -.. image:: https://img.shields.io/badge/skeleton-2021-informational - :target: https://blog.jaraco.com/skeleton - -.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white - :target: https://codecov.io/gh/pypa/setuptools - -.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat - :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme - -.. image:: https://img.shields.io/discord/803025117553754132 - :target: https://discord.com/channels/803025117553754132/815945031150993468 - :alt: Discord - -See the `Installation Instructions -`_ in the Python Packaging -User's Guide for instructions on installing, upgrading, and uninstalling -Setuptools. - -Questions and comments should be directed to `GitHub Discussions -`_. -Bug reports and especially tested patches may be -submitted directly to the `bug tracker -`_. - - -Code of Conduct -=============== - -Everyone interacting in the setuptools project's codebases, issue trackers, -chat rooms, and fora is expected to follow the -`PSF Code of Conduct `_. - - -For Enterprise -============== - -Available as part of the Tidelift Subscription. - -Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. - -`Learn more `_. - - -Security Contact -================ - -To report a security vulnerability, please use the -`Tidelift security contact `_. -Tidelift will coordinate the fix and disclosure. - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/RECORD deleted file mode 100644 index 11d2475..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/RECORD +++ /dev/null @@ -1,175 +0,0 @@ -_distutils_hack/__init__.py,sha256=BsJ97Z52QqLV1epDKYkFDpGYey0Az9xcBHfyPP_aBrQ,6169 -_distutils_hack/override.py,sha256=Eu_s-NF6VIZ4Cqd0tbbA5wtWky2IZPNd8et6GLt1mzo,44 -distutils-precedence.pth,sha256=JjjOniUA5XKl4N5_rtZmHrVp0baW_LoHsN0iPaX10iQ,151 -pkg_resources/__init__.py,sha256=NssS5SQg4XHYfyygmvKvSjlzq9hhdwvr66DL0I5-bO8,107935 -pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701 -pkg_resources/_vendor/importlib_resources/__init__.py,sha256=evPm12kLgYqTm-pbzm60bOuumumT8IpBNWFp0uMyrzE,506 -pkg_resources/_vendor/importlib_resources/_adapters.py,sha256=o51tP2hpVtohP33gSYyAkGNpLfYDBqxxYsadyiRZi1E,4504 -pkg_resources/_vendor/importlib_resources/_common.py,sha256=iIxAaQhotSh6TLLUEfL_ynU2fzEeyHMz9JcL46mUhLg,2741 -pkg_resources/_vendor/importlib_resources/_compat.py,sha256=nFBCGMvImglrqgYkb9aPgOj68-h6xbw-ca94XOv1-zs,2706 -pkg_resources/_vendor/importlib_resources/_itertools.py,sha256=WCdJ1Gs_kNFwKENyIG7TO0Y434IWCu0zjVVSsSbZwU8,884 -pkg_resources/_vendor/importlib_resources/_legacy.py,sha256=TMLkx6aEM6U8xIREPXqGZrMbUhTiPUuPl6ESD7RdYj4,3494 -pkg_resources/_vendor/importlib_resources/abc.py,sha256=MvTJJXajbl74s36Gyeesf76egtbFnh-TMtzQMVhFWXo,3886 -pkg_resources/_vendor/importlib_resources/readers.py,sha256=_9QLGQ5AzrED3PY8S2Zf8V6yLR0-nqqYqtQmgleDJzY,3566 -pkg_resources/_vendor/importlib_resources/simple.py,sha256=xt0qhXbwt3bZ86zuaaKbTiE9A0mDbwu0saRjUq_pcY0,2836 -pkg_resources/_vendor/jaraco/context.py,sha256=7X1tpCLc5EN45iWGzGcsH0Unx62REIkvtRvglj0SiUA,5420 -pkg_resources/_vendor/jaraco/functools.py,sha256=eLwPh8FWY7rQ_cj1YxCekUkibTuerwyoJ_41H7Q7oWM,13515 -pkg_resources/_vendor/jaraco/text/__init__.py,sha256=cN55bFcceW4wTHG5ruv5IuEDRarP-4hBYX8zl94_c30,15526 -pkg_resources/_vendor/more_itertools/__init__.py,sha256=ZQYu_9H6stSG7viUgT32TFqslqcZwq82kWRZooKiI8Y,83 -pkg_resources/_vendor/more_itertools/more.py,sha256=jSrvV9BK-XKa4x7MPPp9yWYRDtRgR5h7yryEqHMU4mg,132578 -pkg_resources/_vendor/more_itertools/recipes.py,sha256=N6aCDwoIPvE-aiqpGU-nbFwqiM3X8MKRcxBM84naW88,18410 -pkg_resources/_vendor/packaging/__about__.py,sha256=IIRHpOsJlJSgkjq1UoeBoMTqhvNp3gN9FyMb5Kf8El4,661 -pkg_resources/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 -pkg_resources/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 -pkg_resources/_vendor/packaging/_musllinux.py,sha256=z5yeG1ygOPx4uUyLdqj-p8Dk5UBb5H_b0NIjW9yo8oA,4378 -pkg_resources/_vendor/packaging/_structures.py,sha256=TMiAgFbdUOPmIfDIfiHc3KFhSJ8kMjof2QS5I-2NyQ8,1629 -pkg_resources/_vendor/packaging/markers.py,sha256=gFSKoBTb0sKDw1v_apJy15lPr0v2mEvuEkfooTtcWx4,8496 -pkg_resources/_vendor/packaging/requirements.py,sha256=uJ4cjwm3_nrfHJLCcGU9mT5aw8SXfw8v1aBUD7OFuVs,4706 -pkg_resources/_vendor/packaging/specifiers.py,sha256=MZ-fYcNL3u7pNrt-6g2EQO7AbRXkjc-SPEYwXMQbLmc,30964 -pkg_resources/_vendor/packaging/tags.py,sha256=vGybAUQYlPKMcukzX_2e65fmafnFFuMbD25naYTEwtc,15710 -pkg_resources/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 -pkg_resources/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 -pkg_resources/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055 -pkg_resources/_vendor/zipp.py,sha256=ajztOH-9I7KA_4wqDYygtHa6xUBVZgFpmZ8FE74HHHI,8425 -pkg_resources/extern/__init__.py,sha256=inFoCK9jn_yRFqkbNSOxOYyZD0aB3awch_xtbwIW_-Y,2426 -setuptools-60.7.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -setuptools-60.7.1.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050 -setuptools-60.7.1.dist-info/METADATA,sha256=R58fixzx8xUNZ9hnL7-NYOaiFgsuqZOwK9ZX4BzsN1g,5924 -setuptools-60.7.1.dist-info/RECORD,, -setuptools-60.7.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -setuptools-60.7.1.dist-info/entry_points.txt,sha256=wpnhLrbtyk4hZ1qCCw48cCSxoQPzULMhIuaFqsB7GxQ,2636 -setuptools-60.7.1.dist-info/top_level.txt,sha256=d9yL39v_W7qmKDDSH6sT4bE0j_Ls1M3P161OGgdsm4g,41 -setuptools/__init__.py,sha256=84lf54h_UbIkYxMn3ZqKyiaC9R86vnmIZUdTT-wxqeg,7461 -setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218 -setuptools/_distutils/__init__.py,sha256=3YtkfadGoU57VMEQFk2TNyMZVud1kDkakWQLhWg2Fm8,536 -setuptools/_distutils/_collections.py,sha256=s7zkSh7QUyJWEYSt5n10ouAZNDYvux8YCHnnY3k0wmQ,1330 -setuptools/_distutils/_msvccompiler.py,sha256=i8vRyUE3jqX5BLzVa3ZeLheyEoKN6KGJDJ44Tlz69ww,20809 -setuptools/_distutils/archive_util.py,sha256=qW-uiGwYexTvK5e-iSel_31Dshx-CqTanNPK6snwf98,8572 -setuptools/_distutils/bcppcompiler.py,sha256=gJqtPboJZl1llfCtjo_SVCE1DdjgK1H2rd0Vngz18QI,14885 -setuptools/_distutils/ccompiler.py,sha256=YbernlpGZZqKnfzZSfJ814fINca8cicZiUlBjyUPyaM,47644 -setuptools/_distutils/cmd.py,sha256=eco6LAGUtobLuPafuhmgKgkwRRL_WY8KJ4YeDCHpcls,18079 -setuptools/_distutils/command/__init__.py,sha256=2TA-rlNDlzeI-csbWHXFjGD8uOYqALMfyWOhT49nC6g,799 -setuptools/_distutils/command/bdist.py,sha256=2z4eudRl_n7m3lG9leL0IYqes4bsm8c0fxfZuiafjMg,5562 -setuptools/_distutils/command/bdist_dumb.py,sha256=BTur9jcIppyP7Piavjfsk7YjElqvxeYO2npUyPPOekc,4913 -setuptools/_distutils/command/bdist_msi.py,sha256=9Q1f4Pw4PRsg54fqHVgH8FZb78-yN4D5VlDC2KugX0A,35574 -setuptools/_distutils/command/bdist_rpm.py,sha256=gjOw22GhDSbcq0bdq25cTb-n6HWWm0bShLQad_mkJ4k,21537 -setuptools/_distutils/command/bdist_wininst.py,sha256=iGlaI-VfElHOneeczKHWnSN5a10-7IMcJaXuR1mdS3c,16030 -setuptools/_distutils/command/build.py,sha256=1AF-dxN_NlOEyoydBz19AwpeWYPSYCZvOLJSN_PdatY,5773 -setuptools/_distutils/command/build_clib.py,sha256=bgVTHh28eLQA2Gkw68amApd_j7qQBX4MTI-zTvAK_J4,8022 -setuptools/_distutils/command/build_ext.py,sha256=KgxpopuD6sqep0LsumMH15joWih0VdbnXpYm-ETNjoE,31612 -setuptools/_distutils/command/build_py.py,sha256=hXesMrH_epNj6K8SUtJdipgEis3EdICKeZ8VWe_ndck,16495 -setuptools/_distutils/command/build_scripts.py,sha256=urdn6wPxPMW5dLqpqFkZ8dqaFG1tf9TiAao6U9LCoEI,5963 -setuptools/_distutils/command/check.py,sha256=brOziX0PqvmfGYSUQlSA93m8b7T350uQwrOowwgNxqE,5630 -setuptools/_distutils/command/clean.py,sha256=2TCt47ru4hZZM0RfVfUYj5bbpicpGLP4Qhw5jBtvp9k,2776 -setuptools/_distutils/command/config.py,sha256=2aTjww3PwjMB8-ZibCe4P7B-qG1hM1gn_rJXYyxRz6c,13117 -setuptools/_distutils/command/install.py,sha256=dsCo4g_FG6SMsX_TIJQ-qaHWbgdjupBBie4-dfm793o,30075 -setuptools/_distutils/command/install_data.py,sha256=YhGOAwh3gJPqF7em5XA0rmpR42z1bLh80ooElzDyUvk,2822 -setuptools/_distutils/command/install_egg_info.py,sha256=WijZ7cHMAkNMMCwrZ--KoqV9M2RtLouU4-qSbiCwv70,2753 -setuptools/_distutils/command/install_headers.py,sha256=XQ6idkbIDfr1ljXCOznuVUMvOFpHBn6cK0Wz9gIM2b4,1298 -setuptools/_distutils/command/install_lib.py,sha256=9AofR-MO9lAtjwwuukCptepOaJEKMZW2VHiyR5hU7HA,8397 -setuptools/_distutils/command/install_scripts.py,sha256=_CLUeQwGJRcY2kik7azPMn5IdtDCrjWdUvZ1khlG6ck,2017 -setuptools/_distutils/command/py37compat.py,sha256=qzRhhvTihqx_PZZt2ZYECxh1X3Oj255VqatzelYFAKw,671 -setuptools/_distutils/command/register.py,sha256=2jaq9968rt2puRVDBx1HbNiXv27uOk8idE_4lPf_3VM,11712 -setuptools/_distutils/command/sdist.py,sha256=qotJjAOzyhJjq2-oDImjNFrOtaSneEFDJTB-sEk1wnU,19005 -setuptools/_distutils/command/upload.py,sha256=BLO1w7eSAqsCjCLXtf_CRVSjwF1WmyOByGVGNdcQ8oY,7597 -setuptools/_distutils/config.py,sha256=dtHgblx9JhfyrKx1-J7Jlxw_f7s8ZbPFQii2UWMTZpY,4827 -setuptools/_distutils/core.py,sha256=0v7Emh9y0AW9o4AEjfVMhDxKzTFWFxUQn46spFSL56g,9282 -setuptools/_distutils/cygwinccompiler.py,sha256=eOMXcoZ_Reto4VQR_lWK-IylR1Lsi_RW0MMwCqhlvtU,14521 -setuptools/_distutils/debug.py,sha256=N6MrTAqK6l9SVk6tWweR108PM8Ol7qNlfyV-nHcLhsY,139 -setuptools/_distutils/dep_util.py,sha256=GuR9Iw_jzZRkyemJ5HX8rB_wRGxkIBcBm1qh54r7zhk,3491 -setuptools/_distutils/dir_util.py,sha256=UwhBOUTcV65GTwce4SPuTXR8Z8q3LYEcmttqcGb0bYo,7778 -setuptools/_distutils/dist.py,sha256=Biuf6ca8uiFfMScRFsYUKtb5neMPtxKxRtXn50_1f3U,50421 -setuptools/_distutils/errors.py,sha256=Yr6tKZGdzBoNi53vBtiq0UJ__X05CmxSdQJqOWaw6SY,3577 -setuptools/_distutils/extension.py,sha256=bTb3Q0CoevGKYv5dX1ls--Ln8tlB0-UEOsi9BwzlZ-s,10515 -setuptools/_distutils/fancy_getopt.py,sha256=OPxp2CxHi1Yp_d1D8JxW4Ueq9fC71tegQFaafh58GGU,17784 -setuptools/_distutils/file_util.py,sha256=0hUqfItN_x2DVihR0MHdA4KCMVCOO8VoByaFp_a6MDg,8148 -setuptools/_distutils/filelist.py,sha256=Z9f5hvepZnpniZ2IFmCnWIjdviWozs8sbARBhWajwoM,13407 -setuptools/_distutils/log.py,sha256=gZ0wCQvSMzrS_6ccOhtvceqigM77oT_GKB_nnooRIXo,1973 -setuptools/_distutils/msvc9compiler.py,sha256=XXs85TZO4quRKOdWUk6ylcD-1f_QAm4ceiHbiW5tH-k,30474 -setuptools/_distutils/msvccompiler.py,sha256=E-Fm6eLWnRr-sSRpHGOq6ezJ7YCnDuM3MV7_i9wiTRg,23531 -setuptools/_distutils/py35compat.py,sha256=-sk1vBIsOgH-AobjIYbK_OEjdJF_54Ul_D1EiE9XM_c,455 -setuptools/_distutils/py38compat.py,sha256=II7ddBxOijC7uNN4z_46HYUjwYTJYMNiLJoGTormZm0,212 -setuptools/_distutils/spawn.py,sha256=rbPiTTUGLIo0QtzS-n04HuzXh15ztDcUDJ94vXfheFY,3474 -setuptools/_distutils/sysconfig.py,sha256=MJ2B3ARCEVi0TqHjTTQVxIfKLsQ1BUF__cfT9nCjTb4,21103 -setuptools/_distutils/text_file.py,sha256=PsuAJeWdKJoLSV_6N6IpB5-0Pa84KzLUucJMFRazw3I,12483 -setuptools/_distutils/unixccompiler.py,sha256=u2Sfs6LRmqQux4nZW08GwDtoFMded6wYnkiaO2TvKC4,14538 -setuptools/_distutils/util.py,sha256=2WMXovrYPE-Uc06ckbV-mHgm9IKsaQ4nUdSGiK-9w_I,18537 -setuptools/_distutils/version.py,sha256=syRvPxuMQxnftpuIKeRE-2ELQ_ZMCwMJ-o8ie-lxdZo,13015 -setuptools/_distutils/versionpredicate.py,sha256=vx4ND3BtMgxFR9iZ4_t3WFa-NdIKxO8vtOd0twBppxc,5277 -setuptools/_imp.py,sha256=HmF91IbitRfsD5z-g4_wmcuH-RahyIONbPgiCOFgtzA,2392 -setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -setuptools/_vendor/more_itertools/__init__.py,sha256=C7sXffHTXM3P-iaLPPfqfmDoxOflQMJLcM7ed9p3jak,82 -setuptools/_vendor/more_itertools/more.py,sha256=DlZa8v6JihVwfQ5zHidOA-xDE0orcQIUyxVnCaUoDKE,117968 -setuptools/_vendor/more_itertools/recipes.py,sha256=UkNkrsZyqiwgLHANBTmvMhCvaNSvSNYhyOpz_Jc55DY,16256 -setuptools/_vendor/ordered_set.py,sha256=dbaCcs27dyN9gnMWGF5nA_BrVn6Q-NrjKYJpV9_fgBs,15130 -setuptools/_vendor/packaging/__about__.py,sha256=IIRHpOsJlJSgkjq1UoeBoMTqhvNp3gN9FyMb5Kf8El4,661 -setuptools/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 -setuptools/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 -setuptools/_vendor/packaging/_musllinux.py,sha256=z5yeG1ygOPx4uUyLdqj-p8Dk5UBb5H_b0NIjW9yo8oA,4378 -setuptools/_vendor/packaging/_structures.py,sha256=TMiAgFbdUOPmIfDIfiHc3KFhSJ8kMjof2QS5I-2NyQ8,1629 -setuptools/_vendor/packaging/markers.py,sha256=lihRgqpZjLM-JW-vxlLPqU3kmVe79g9vypy1kxmTRuQ,8493 -setuptools/_vendor/packaging/requirements.py,sha256=Opd0FjqgdEiWkzBLyo1oLU0Dj01uIFwTAnAJQrr6j2A,4700 -setuptools/_vendor/packaging/specifiers.py,sha256=MZ-fYcNL3u7pNrt-6g2EQO7AbRXkjc-SPEYwXMQbLmc,30964 -setuptools/_vendor/packaging/tags.py,sha256=vGybAUQYlPKMcukzX_2e65fmafnFFuMbD25naYTEwtc,15710 -setuptools/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 -setuptools/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 -setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055 -setuptools/archive_util.py,sha256=maJDbozRbDeSPw53VT0cb_IS3W0Ap73lJR8tX8RZDx0,7077 -setuptools/build_meta.py,sha256=hCU742vjgXHY6oKPYttBkie-n4DVNAJrUOgn0O_V3nc,10536 -setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 -setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752 -setuptools/cli-arm64.exe,sha256=o9amxowudZ98NvNWh_a2DRY8LhoIRqTAekxABqltiMc,137216 -setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 -setuptools/command/__init__.py,sha256=e-8TJOikUe3St0fw2b2p9u5EDdSxl5zHUBJJKifbcQ8,217 -setuptools/command/alias.py,sha256=1sLQxZcNh6dDQpDmm4G7UGGTol83nY1NTPmNBbm2siI,2381 -setuptools/command/bdist_egg.py,sha256=-upiB6fFtm8cQSQj1LRDVpG1-T143DsXCvV0fh03u7U,16604 -setuptools/command/bdist_rpm.py,sha256=PxrgoHPNaw2Pw2qNjjHDPC-Ay_IaDbCqP3d_5N-cj2A,1182 -setuptools/command/build_clib.py,sha256=fWHSFGkk10VCddBWCszvNhowbG9Z9CZXVjQ2uSInoOs,4415 -setuptools/command/build_ext.py,sha256=SNK042HfB2ezlDQbSVRGFqI1IM5A4AsjU1wpV3fgskE,13212 -setuptools/command/build_py.py,sha256=c90V1nVPEtYkdye-xvo-B48V5RLvSgD8JBMfPtUbtYw,8751 -setuptools/command/develop.py,sha256=5_Ss7ENd1_B_jVMY1tF5UV_y1Xu6jbVzAPG8oKeluGA,7012 -setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960 -setuptools/command/easy_install.py,sha256=GP7c8I2Vbyr35LxYg9q7RSH8AYrQJhCZ1BG2FQAebsA,86141 -setuptools/command/egg_info.py,sha256=0Y3BXOaQ5tftvCet6LA4lD4A_K9igTKfl-wUJmvs84A,26126 -setuptools/command/install.py,sha256=UynjFBgRyyHrDZRVAmXrXG0vChJAMx-sxnOO3JoAzVo,4906 -setuptools/command/install_egg_info.py,sha256=bMgeIeRiXzQ4DAGPV1328kcjwQjHjOWU4FngAWLV78Q,2203 -setuptools/command/install_lib.py,sha256=Uz42McsyHZAjrB6cw9E7Bz0xsaTbzxnM1PI9CBhiPtE,3875 -setuptools/command/install_scripts.py,sha256=o0jN_ex7yYYk8W5clymTFOXwkFMKzW9q_zd9Npcex7M,2593 -setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628 -setuptools/command/py36compat.py,sha256=7yLWzQj179Enx3pJ8V1cDDCzeLMFMd9XJXlK-iZTq5Y,4946 -setuptools/command/register.py,sha256=kk3DxXCb5lXTvqnhfwx2g6q7iwbUmgTyXUCaBooBOUk,468 -setuptools/command/rotate.py,sha256=SvsQPasezIojPjvMnfkqzh8P0U0tCj0daczF8uc3NQM,2128 -setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658 -setuptools/command/sdist.py,sha256=2onJidYBPFpUgcX6J4KjZX5ilwciHPRB8VkID5YVaL0,6413 -setuptools/command/setopt.py,sha256=okxhqD1NM1nQlbSVDCNv6P7Y7g680sc2r-tUW7wPH1Y,5086 -setuptools/command/test.py,sha256=qGY-Hx1RPCndlVh2rsrEs5479CgmxRsrEflVLr98jVA,8088 -setuptools/command/upload.py,sha256=XT3YFVfYPAmA5qhGg0euluU98ftxRUW-PzKcODMLxUs,462 -setuptools/command/upload_docs.py,sha256=ba5kOyedD_u62weinrxqqnvpuQvBIuamXehJG6tAvO0,7218 -setuptools/config.py,sha256=O-T_28163qkEeaX8bLgqJLuOLYur15cC2_xpA0RENfM,23153 -setuptools/dep_util.py,sha256=BDx1BkzNQntvAB4alypHbW5UVBzjqths000PrUL4Zqc,949 -setuptools/depends.py,sha256=QYQIadr5DwLxPzkErhNt5hmRhvGhWxoXZMRXCm_jcQ0,5499 -setuptools/dist.py,sha256=96Hyp6sNqM9yj02sjZUQyhkk6xsljYiB2oLiSWkrLMU,43407 -setuptools/errors.py,sha256=t4Rm85eXm71Ti0-PO1gAQMRK3V7NN3x1tcbcw0-xGSI,1555 -setuptools/extension.py,sha256=wOWVz6qk-B3qx-O3vNp2gUZ2ItDEoB4MXIDNU_HCdiU,1675 -setuptools/extern/__init__.py,sha256=Hhf9W73WAitw9TdRJfDIb6YFjmK56CF61afds1Mg0HY,2407 -setuptools/glob.py,sha256=1oZjbfjAHSXbgdhSuR6YGU8jKob9L8NtEmBYqcPTLYk,4873 -setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 -setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264 -setuptools/gui-arm64.exe,sha256=TEFnOKDi-mq3ZszxqbCoCXTnM_lhUWjdIqBpr6fVs40,137728 -setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 -setuptools/installer.py,sha256=s6DQfsoICBJxbUqbduhOJtl1oG0S4yegRCg3EAs0i3M,3824 -setuptools/launch.py,sha256=TyPT-Ic1T2EnYvGO26gfNRP4ysBlrhpbRjQxWsiO414,812 -setuptools/logging.py,sha256=npS_AaQd7a-4CVqxC8CKKkwQqLNvSH__pPO94BOB8TY,1148 -setuptools/monkey.py,sha256=0e3HdVKXHL415O7np-AUqhEFXPPuDdJKbI47chQ_DE4,5217 -setuptools/msvc.py,sha256=3LLt938e6OR7wWPzIvCQu7LCWZSIKqoKV6w3r8jV3kY,50561 -setuptools/namespaces.py,sha256=PMqGVPXPYQgjUTvEg9bGccRAkIODrQ6NmsDg_fwErwI,3093 -setuptools/package_index.py,sha256=iajBWGn6Kh6ti4xaiy5fmvG6_SlqLxJ1jV-RK9wnCHs,40055 -setuptools/py34compat.py,sha256=KYOd6ybRxjBW8NJmYD8t_UyyVmysppFXqHpFLdslGXU,245 -setuptools/sandbox.py,sha256=mR83i-mu-ZUU_7TaMgYCeRSyzkqv8loJ_GR9xhS2DDw,14348 -setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218 -setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138 -setuptools/unicode_utils.py,sha256=aOOFo4JGwAsiBttGYDsqFS7YqWQeZ2j6DWiCuctR_00,941 -setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144 -setuptools/wheel.py,sha256=4yoNrbyYshwcbLyV5hKksVJNnIXWV2pde_4sUYaxAcY,8274 -setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/entry_points.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/entry_points.txt deleted file mode 100644 index 9466bf6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/entry_points.txt +++ /dev/null @@ -1,56 +0,0 @@ -[distutils.commands] -alias = setuptools.command.alias:alias -bdist_egg = setuptools.command.bdist_egg:bdist_egg -bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm -build_clib = setuptools.command.build_clib:build_clib -build_ext = setuptools.command.build_ext:build_ext -build_py = setuptools.command.build_py:build_py -develop = setuptools.command.develop:develop -dist_info = setuptools.command.dist_info:dist_info -easy_install = setuptools.command.easy_install:easy_install -egg_info = setuptools.command.egg_info:egg_info -install = setuptools.command.install:install -install_egg_info = setuptools.command.install_egg_info:install_egg_info -install_lib = setuptools.command.install_lib:install_lib -install_scripts = setuptools.command.install_scripts:install_scripts -rotate = setuptools.command.rotate:rotate -saveopts = setuptools.command.saveopts:saveopts -sdist = setuptools.command.sdist:sdist -setopt = setuptools.command.setopt:setopt -test = setuptools.command.test:test -upload_docs = setuptools.command.upload_docs:upload_docs - -[distutils.setup_keywords] -dependency_links = setuptools.dist:assert_string_list -eager_resources = setuptools.dist:assert_string_list -entry_points = setuptools.dist:check_entry_points -exclude_package_data = setuptools.dist:check_package_data -extras_require = setuptools.dist:check_extras -include_package_data = setuptools.dist:assert_bool -install_requires = setuptools.dist:check_requirements -namespace_packages = setuptools.dist:check_nsp -package_data = setuptools.dist:check_package_data -packages = setuptools.dist:check_packages -python_requires = setuptools.dist:check_specifier -setup_requires = setuptools.dist:check_requirements -test_loader = setuptools.dist:check_importable -test_runner = setuptools.dist:check_importable -test_suite = setuptools.dist:check_test_suite -tests_require = setuptools.dist:check_requirements -use_2to3 = setuptools.dist:invalid_unless_false -zip_safe = setuptools.dist:assert_bool - -[egg_info.writers] -PKG-INFO = setuptools.command.egg_info:write_pkg_info -dependency_links.txt = setuptools.command.egg_info:overwrite_arg -depends.txt = setuptools.command.egg_info:warn_depends_obsolete -eager_resources.txt = setuptools.command.egg_info:overwrite_arg -entry_points.txt = setuptools.command.egg_info:write_entries -namespace_packages.txt = setuptools.command.egg_info:overwrite_arg -requires.txt = setuptools.command.egg_info:write_requirements -top_level.txt = setuptools.command.egg_info:write_toplevel_names - -[setuptools.finalize_distribution_options] -keywords = setuptools.dist:Distribution._finalize_setup_keywords -parent_finalize = setuptools.dist:_Distribution.finalize_options - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/top_level.txt deleted file mode 100644 index b5ac107..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/top_level.txt +++ /dev/null @@ -1,3 +0,0 @@ -_distutils_hack -pkg_resources -setuptools diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/__init__.py deleted file mode 100755 index 06991b6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/__init__.py +++ /dev/null @@ -1,244 +0,0 @@ -"""Extensions to the 'distutils' for large or complex distributions""" - -from fnmatch import fnmatchcase -import functools -import os -import re - -import _distutils_hack.override # noqa: F401 - -import distutils.core -from distutils.errors import DistutilsOptionError -from distutils.util import convert_path - -from ._deprecation_warning import SetuptoolsDeprecationWarning - -import setuptools.version -from setuptools.extension import Extension -from setuptools.dist import Distribution -from setuptools.depends import Require -from . import monkey -from . import logging - - -__all__ = [ - 'setup', - 'Distribution', - 'Command', - 'Extension', - 'Require', - 'SetuptoolsDeprecationWarning', - 'find_packages', - 'find_namespace_packages', -] - -__version__ = setuptools.version.__version__ - -bootstrap_install_from = None - - -class PackageFinder: - """ - Generate a list of all Python packages found within a directory - """ - - @classmethod - def find(cls, where='.', exclude=(), include=('*',)): - """Return a list all Python packages found within directory 'where' - - 'where' is the root directory which will be searched for packages. It - should be supplied as a "cross-platform" (i.e. URL-style) path; it will - be converted to the appropriate local path syntax. - - 'exclude' is a sequence of package names to exclude; '*' can be used - as a wildcard in the names, such that 'foo.*' will exclude all - subpackages of 'foo' (but not 'foo' itself). - - 'include' is a sequence of package names to include. If it's - specified, only the named packages will be included. If it's not - specified, all found packages will be included. 'include' can contain - shell style wildcard patterns just like 'exclude'. - """ - - return list( - cls._find_packages_iter( - convert_path(where), - cls._build_filter('ez_setup', '*__pycache__', *exclude), - cls._build_filter(*include), - ) - ) - - @classmethod - def _find_packages_iter(cls, where, exclude, include): - """ - All the packages found in 'where' that pass the 'include' filter, but - not the 'exclude' filter. - """ - for root, dirs, files in os.walk(where, followlinks=True): - # Copy dirs to iterate over it, then empty dirs. - all_dirs = dirs[:] - dirs[:] = [] - - for dir in all_dirs: - full_path = os.path.join(root, dir) - rel_path = os.path.relpath(full_path, where) - package = rel_path.replace(os.path.sep, '.') - - # Skip directory trees that are not valid packages - if '.' in dir or not cls._looks_like_package(full_path): - continue - - # Should this package be included? - if include(package) and not exclude(package): - yield package - - # Keep searching subdirectories, as there may be more packages - # down there, even if the parent was excluded. - dirs.append(dir) - - @staticmethod - def _looks_like_package(path): - """Does a directory look like a package?""" - return os.path.isfile(os.path.join(path, '__init__.py')) - - @staticmethod - def _build_filter(*patterns): - """ - Given a list of patterns, return a callable that will be true only if - the input matches at least one of the patterns. - """ - return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) - - -class PEP420PackageFinder(PackageFinder): - @staticmethod - def _looks_like_package(path): - return True - - -find_packages = PackageFinder.find -find_namespace_packages = PEP420PackageFinder.find - - -def _install_setup_requires(attrs): - # Note: do not use `setuptools.Distribution` directly, as - # our PEP 517 backend patch `distutils.core.Distribution`. - class MinimalDistribution(distutils.core.Distribution): - """ - A minimal version of a distribution for supporting the - fetch_build_eggs interface. - """ - - def __init__(self, attrs): - _incl = 'dependency_links', 'setup_requires' - filtered = {k: attrs[k] for k in set(_incl) & set(attrs)} - super().__init__(filtered) - - def finalize_options(self): - """ - Disable finalize_options to avoid building the working set. - Ref #2158. - """ - - dist = MinimalDistribution(attrs) - - # Honor setup.cfg's options. - dist.parse_config_files(ignore_option_errors=True) - if dist.setup_requires: - dist.fetch_build_eggs(dist.setup_requires) - - -def setup(**attrs): - # Make sure we have any requirements needed to interpret 'attrs'. - logging.configure() - _install_setup_requires(attrs) - return distutils.core.setup(**attrs) - - -setup.__doc__ = distutils.core.setup.__doc__ - - -_Command = monkey.get_unpatched(distutils.core.Command) - - -class Command(_Command): - __doc__ = _Command.__doc__ - - command_consumes_arguments = False - - def __init__(self, dist, **kw): - """ - Construct the command for dist, updating - vars(self) with any keyword parameters. - """ - super().__init__(dist) - vars(self).update(kw) - - def _ensure_stringlike(self, option, what, default=None): - val = getattr(self, option) - if val is None: - setattr(self, option, default) - return default - elif not isinstance(val, str): - raise DistutilsOptionError( - "'%s' must be a %s (got `%s`)" % (option, what, val) - ) - return val - - def ensure_string_list(self, option): - r"""Ensure that 'option' is a list of strings. If 'option' is - currently a string, we split it either on /,\s*/ or /\s+/, so - "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become - ["foo", "bar", "baz"]. - """ - val = getattr(self, option) - if val is None: - return - elif isinstance(val, str): - setattr(self, option, re.split(r',\s*|\s+', val)) - else: - if isinstance(val, list): - ok = all(isinstance(v, str) for v in val) - else: - ok = False - if not ok: - raise DistutilsOptionError( - "'%s' must be a list of strings (got %r)" % (option, val) - ) - - def reinitialize_command(self, command, reinit_subcommands=0, **kw): - cmd = _Command.reinitialize_command(self, command, reinit_subcommands) - vars(cmd).update(kw) - return cmd - - -def _find_all_simple(path): - """ - Find all files under 'path' - """ - results = ( - os.path.join(base, file) - for base, dirs, files in os.walk(path, followlinks=True) - for file in files - ) - return filter(os.path.isfile, results) - - -def findall(dir=os.curdir): - """ - Find all files under 'dir' and return the list of full filenames. - Unless dir is '.', return full filenames with dir prepended. - """ - files = _find_all_simple(dir) - if dir == os.curdir: - make_rel = functools.partial(os.path.relpath, start=dir) - files = map(make_rel, files) - return list(files) - - -class sic(str): - """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)""" - - -# Apply monkey patches -monkey.patch_all() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_deprecation_warning.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_deprecation_warning.py deleted file mode 100755 index 086b64d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_deprecation_warning.py +++ /dev/null @@ -1,7 +0,0 @@ -class SetuptoolsDeprecationWarning(Warning): - """ - Base class for warning deprecations in ``setuptools`` - - This class is not derived from ``DeprecationWarning``, and as such is - visible by default. - """ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/__init__.py deleted file mode 100755 index 8fd493b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -"""distutils - -The main package for the Python Module Distribution Utilities. Normally -used from a setup script as - - from distutils.core import setup - - setup (...) -""" - -import sys -import importlib - -__version__ = sys.version[:sys.version.index(' ')] - - -try: - # Allow Debian and pkgsrc (only) to customize system - # behavior. Ref pypa/distutils#2 and pypa/distutils#16. - # This hook is deprecated and no other environments - # should use it. - importlib.import_module('_distutils_system_mod') -except ImportError: - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/_collections.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/_collections.py deleted file mode 100755 index 98fce80..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/_collections.py +++ /dev/null @@ -1,56 +0,0 @@ -import collections -import itertools - - -# from jaraco.collections 3.5.1 -class DictStack(list, collections.abc.Mapping): - """ - A stack of dictionaries that behaves as a view on those dictionaries, - giving preference to the last. - - >>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)]) - >>> stack['a'] - 2 - >>> stack['b'] - 2 - >>> stack['c'] - 2 - >>> len(stack) - 3 - >>> stack.push(dict(a=3)) - >>> stack['a'] - 3 - >>> set(stack.keys()) == set(['a', 'b', 'c']) - True - >>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)]) - True - >>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2) - True - >>> d = stack.pop() - >>> stack['a'] - 2 - >>> d = stack.pop() - >>> stack['a'] - 1 - >>> stack.get('b', None) - >>> 'c' in stack - True - """ - - def __iter__(self): - dicts = list.__iter__(self) - return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts))) - - def __getitem__(self, key): - for scope in reversed(tuple(list.__iter__(self))): - if key in scope: - return scope[key] - raise KeyError(key) - - push = list.append - - def __contains__(self, other): - return collections.abc.Mapping.__contains__(self, other) - - def __len__(self): - return len(list(iter(self))) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/_msvccompiler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/_msvccompiler.py deleted file mode 100755 index f2f801c..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/_msvccompiler.py +++ /dev/null @@ -1,561 +0,0 @@ -"""distutils._msvccompiler - -Contains MSVCCompiler, an implementation of the abstract CCompiler class -for Microsoft Visual Studio 2015. - -The module is compatible with VS 2015 and later. You can find legacy support -for older versions in distutils.msvc9compiler and distutils.msvccompiler. -""" - -# Written by Perry Stoll -# hacked by Robin Becker and Thomas Heller to do a better job of -# finding DevStudio (through the registry) -# ported to VS 2005 and VS 2008 by Christian Heimes -# ported to VS 2015 by Steve Dower - -import os -import subprocess -import contextlib -import warnings -import unittest.mock -with contextlib.suppress(ImportError): - import winreg - -from distutils.errors import DistutilsExecError, DistutilsPlatformError, \ - CompileError, LibError, LinkError -from distutils.ccompiler import CCompiler, gen_lib_options -from distutils import log -from distutils.util import get_platform - -from itertools import count - -def _find_vc2015(): - try: - key = winreg.OpenKeyEx( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY - ) - except OSError: - log.debug("Visual C++ is not registered") - return None, None - - best_version = 0 - best_dir = None - with key: - for i in count(): - try: - v, vc_dir, vt = winreg.EnumValue(key, i) - except OSError: - break - if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir): - try: - version = int(float(v)) - except (ValueError, TypeError): - continue - if version >= 14 and version > best_version: - best_version, best_dir = version, vc_dir - return best_version, best_dir - -def _find_vc2017(): - """Returns "15, path" based on the result of invoking vswhere.exe - If no install is found, returns "None, None" - - The version is returned to avoid unnecessarily changing the function - result. It may be ignored when the path is not None. - - If vswhere.exe is not available, by definition, VS 2017 is not - installed. - """ - root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") - if not root: - return None, None - - try: - path = subprocess.check_output([ - os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), - "-latest", - "-prerelease", - "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "-property", "installationPath", - "-products", "*", - ], encoding="mbcs", errors="strict").strip() - except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): - return None, None - - path = os.path.join(path, "VC", "Auxiliary", "Build") - if os.path.isdir(path): - return 15, path - - return None, None - -PLAT_SPEC_TO_RUNTIME = { - 'x86' : 'x86', - 'x86_amd64' : 'x64', - 'x86_arm' : 'arm', - 'x86_arm64' : 'arm64' -} - -def _find_vcvarsall(plat_spec): - # bpo-38597: Removed vcruntime return value - _, best_dir = _find_vc2017() - - if not best_dir: - best_version, best_dir = _find_vc2015() - - if not best_dir: - log.debug("No suitable Visual C++ version found") - return None, None - - vcvarsall = os.path.join(best_dir, "vcvarsall.bat") - if not os.path.isfile(vcvarsall): - log.debug("%s cannot be found", vcvarsall) - return None, None - - return vcvarsall, None - -def _get_vc_env(plat_spec): - if os.getenv("DISTUTILS_USE_SDK"): - return { - key.lower(): value - for key, value in os.environ.items() - } - - vcvarsall, _ = _find_vcvarsall(plat_spec) - if not vcvarsall: - raise DistutilsPlatformError("Unable to find vcvarsall.bat") - - try: - out = subprocess.check_output( - 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), - stderr=subprocess.STDOUT, - ).decode('utf-16le', errors='replace') - except subprocess.CalledProcessError as exc: - log.error(exc.output) - raise DistutilsPlatformError("Error executing {}" - .format(exc.cmd)) - - env = { - key.lower(): value - for key, _, value in - (line.partition('=') for line in out.splitlines()) - if key and value - } - - return env - -def _find_exe(exe, paths=None): - """Return path to an MSVC executable program. - - Tries to find the program in several places: first, one of the - MSVC program search paths from the registry; next, the directories - in the PATH environment variable. If any of those work, return an - absolute path that is known to exist. If none of them work, just - return the original program name, 'exe'. - """ - if not paths: - paths = os.getenv('path').split(os.pathsep) - for p in paths: - fn = os.path.join(os.path.abspath(p), exe) - if os.path.isfile(fn): - return fn - return exe - -# A map keyed by get_platform() return values to values accepted by -# 'vcvarsall.bat'. Always cross-compile from x86 to work with the -# lighter-weight MSVC installs that do not include native 64-bit tools. -PLAT_TO_VCVARS = { - 'win32' : 'x86', - 'win-amd64' : 'x86_amd64', - 'win-arm32' : 'x86_arm', - 'win-arm64' : 'x86_arm64' -} - -class MSVCCompiler(CCompiler) : - """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" - - compiler_type = 'msvc' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - _rc_extensions = ['.rc'] - _mc_extensions = ['.mc'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = (_c_extensions + _cpp_extensions + - _rc_extensions + _mc_extensions) - res_extension = '.res' - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - - def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) - # target platform (.plat_name is consistent with 'bdist') - self.plat_name = None - self.initialized = False - - def initialize(self, plat_name=None): - # multi-init means we would need to check platform same each time... - assert not self.initialized, "don't init multiple times" - if plat_name is None: - plat_name = get_platform() - # sanity check for platforms to prevent obscure errors later. - if plat_name not in PLAT_TO_VCVARS: - raise DistutilsPlatformError("--plat-name must be one of {}" - .format(tuple(PLAT_TO_VCVARS))) - - # Get the vcvarsall.bat spec for the requested platform. - plat_spec = PLAT_TO_VCVARS[plat_name] - - vc_env = _get_vc_env(plat_spec) - if not vc_env: - raise DistutilsPlatformError("Unable to find a compatible " - "Visual Studio installation.") - - self._paths = vc_env.get('path', '') - paths = self._paths.split(os.pathsep) - self.cc = _find_exe("cl.exe", paths) - self.linker = _find_exe("link.exe", paths) - self.lib = _find_exe("lib.exe", paths) - self.rc = _find_exe("rc.exe", paths) # resource compiler - self.mc = _find_exe("mc.exe", paths) # message compiler - self.mt = _find_exe("mt.exe", paths) # message compiler - - for dir in vc_env.get('include', '').split(os.pathsep): - if dir: - self.add_include_dir(dir.rstrip(os.sep)) - - for dir in vc_env.get('lib', '').split(os.pathsep): - if dir: - self.add_library_dir(dir.rstrip(os.sep)) - - self.preprocess_options = None - # bpo-38597: Always compile with dynamic linking - # Future releases of Python 3.x will include all past - # versions of vcruntime*.dll for compatibility. - self.compile_options = [ - '/nologo', '/O2', '/W3', '/GL', '/DNDEBUG', '/MD' - ] - - self.compile_options_debug = [ - '/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG' - ] - - ldflags = [ - '/nologo', '/INCREMENTAL:NO', '/LTCG' - ] - - ldflags_debug = [ - '/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL' - ] - - self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1'] - self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1'] - self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO'] - self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO'] - self.ldflags_static = [*ldflags] - self.ldflags_static_debug = [*ldflags_debug] - - self._ldflags = { - (CCompiler.EXECUTABLE, None): self.ldflags_exe, - (CCompiler.EXECUTABLE, False): self.ldflags_exe, - (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug, - (CCompiler.SHARED_OBJECT, None): self.ldflags_shared, - (CCompiler.SHARED_OBJECT, False): self.ldflags_shared, - (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug, - (CCompiler.SHARED_LIBRARY, None): self.ldflags_static, - (CCompiler.SHARED_LIBRARY, False): self.ldflags_static, - (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug, - } - - self.initialized = True - - # -- Worker methods ------------------------------------------------ - - def object_filenames(self, - source_filenames, - strip_dir=0, - output_dir=''): - ext_map = { - **{ext: self.obj_extension for ext in self.src_extensions}, - **{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions}, - } - - output_dir = output_dir or '' - - def make_out_path(p): - base, ext = os.path.splitext(p) - if strip_dir: - base = os.path.basename(base) - else: - _, base = os.path.splitdrive(base) - if base.startswith((os.path.sep, os.path.altsep)): - base = base[1:] - try: - # XXX: This may produce absurdly long paths. We should check - # the length of the result and trim base until we fit within - # 260 characters. - return os.path.join(output_dir, base + ext_map[ext]) - except LookupError: - # Better to raise an exception instead of silently continuing - # and later complain about sources and targets having - # different lengths - raise CompileError("Don't know how to compile {}".format(p)) - - return list(map(make_out_path, source_filenames)) - - - def compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): - - if not self.initialized: - self.initialize() - compile_info = self._setup_compile(output_dir, macros, include_dirs, - sources, depends, extra_postargs) - macros, objects, extra_postargs, pp_opts, build = compile_info - - compile_opts = extra_preargs or [] - compile_opts.append('/c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - - add_cpp_opts = False - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - if debug: - # pass the full pathname to MSVC in debug mode, - # this allows the debugger to find the source file - # without asking the user to browse for it - src = os.path.abspath(src) - - if ext in self._c_extensions: - input_opt = "/Tc" + src - elif ext in self._cpp_extensions: - input_opt = "/Tp" + src - add_cpp_opts = True - elif ext in self._rc_extensions: - # compile .RC to .RES file - input_opt = src - output_opt = "/fo" + obj - try: - self.spawn([self.rc] + pp_opts + [output_opt, input_opt]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue - elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. - # * '-h dir' specifies the directory for the - # generated include file - # * '-r dir' specifies the target directory of the - # generated RC file and the binary message resource - # it includes - # - # For now (since there are no options to change this), - # we use the source-directory for the include file and - # the build directory for the RC file and message - # resources. This works at least for win32all. - h_dir = os.path.dirname(src) - rc_dir = os.path.dirname(obj) - try: - # first compile .MC to .RC and .H file - self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src]) - base, _ = os.path.splitext(os.path.basename (src)) - rc_file = os.path.join(rc_dir, base + '.rc') - # then compile .RC to .RES file - self.spawn([self.rc, "/fo" + obj, rc_file]) - - except DistutilsExecError as msg: - raise CompileError(msg) - continue - else: - # how to handle this file? - raise CompileError("Don't know how to compile {} to {}" - .format(src, obj)) - - args = [self.cc] + compile_opts + pp_opts - if add_cpp_opts: - args.append('/EHsc') - args.append(input_opt) - args.append("/Fo" + obj) - args.extend(extra_postargs) - - try: - self.spawn(args) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - - def create_static_lib(self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): - - if not self.initialized: - self.initialize() - objects, output_dir = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, - output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = objects + ['/OUT:' + output_filename] - if debug: - pass # XXX what goes here? - try: - log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args)) - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - - def link(self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - if not self.initialized: - self.initialize() - objects, output_dir = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, - runtime_library_dirs) - libraries, library_dirs, runtime_library_dirs = fixed_args - - if runtime_library_dirs: - self.warn("I don't know what to do with 'runtime_library_dirs': " - + str(runtime_library_dirs)) - - lib_opts = gen_lib_options(self, - library_dirs, runtime_library_dirs, - libraries) - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - ldflags = self._ldflags[target_desc, debug] - - export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])] - - ld_args = (ldflags + lib_opts + export_opts + - objects + ['/OUT:' + output_filename]) - - # The MSVC linker generates .lib and .exp files, which cannot be - # suppressed by any linker switches. The .lib files may even be - # needed! Make sure they are generated in the temporary build - # directory. Since they have different names for debug and release - # builds, they can go into the same directory. - build_temp = os.path.dirname(objects[0]) - if export_symbols is not None: - (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename)) - implib_file = os.path.join( - build_temp, - self.library_filename(dll_name)) - ld_args.append ('/IMPLIB:' + implib_file) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - output_dir = os.path.dirname(os.path.abspath(output_filename)) - self.mkpath(output_dir) - try: - log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args)) - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def spawn(self, cmd): - env = dict(os.environ, PATH=self._paths) - with self._fallback_spawn(cmd, env) as fallback: - return super().spawn(cmd, env=env) - return fallback.value - - @contextlib.contextmanager - def _fallback_spawn(self, cmd, env): - """ - Discovered in pypa/distutils#15, some tools monkeypatch the compiler, - so the 'env' kwarg causes a TypeError. Detect this condition and - restore the legacy, unsafe behavior. - """ - bag = type('Bag', (), {})() - try: - yield bag - except TypeError as exc: - if "unexpected keyword argument 'env'" not in str(exc): - raise - else: - return - warnings.warn( - "Fallback spawn triggered. Please update distutils monkeypatch.") - with unittest.mock.patch.dict('os.environ', env): - bag.value = super().spawn(cmd) - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "/LIBPATH:" + dir - - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC") - - def library_option(self, lib): - return self.library_filename(lib) - - def find_library_file(self, dirs, lib, debug=0): - # Prefer a debugging library if found (and requested), but deal - # with it if we don't have one. - if debug: - try_names = [lib + "_d", lib] - else: - try_names = [lib] - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) - if os.path.isfile(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/archive_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/archive_util.py deleted file mode 100755 index 565a311..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/archive_util.py +++ /dev/null @@ -1,256 +0,0 @@ -"""distutils.archive_util - -Utility functions for creating archive files (tarballs, zip files, -that sort of thing).""" - -import os -from warnings import warn -import sys - -try: - import zipfile -except ImportError: - zipfile = None - - -from distutils.errors import DistutilsExecError -from distutils.spawn import spawn -from distutils.dir_util import mkpath -from distutils import log - -try: - from pwd import getpwnam -except ImportError: - getpwnam = None - -try: - from grp import getgrnam -except ImportError: - getgrnam = None - -def _get_gid(name): - """Returns a gid, given a group name.""" - if getgrnam is None or name is None: - return None - try: - result = getgrnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _get_uid(name): - """Returns an uid, given a user name.""" - if getpwnam is None or name is None: - return None - try: - result = getpwnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, - owner=None, group=None): - """Create a (possibly compressed) tar file from all the files under - 'base_dir'. - - 'compress' must be "gzip" (the default), "bzip2", "xz", "compress", or - None. ("compress" will be deprecated in Python 3.2) - - 'owner' and 'group' can be used to define an owner and a group for the - archive that is being built. If not provided, the current owner and group - will be used. - - The output tar file will be named 'base_dir' + ".tar", possibly plus - the appropriate compression extension (".gz", ".bz2", ".xz" or ".Z"). - - Returns the output filename. - """ - tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '', - 'compress': ''} - compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz', - 'compress': '.Z'} - - # flags for compression program, each element of list will be an argument - if compress is not None and compress not in compress_ext.keys(): - raise ValueError( - "bad value for 'compress': must be None, 'gzip', 'bzip2', " - "'xz' or 'compress'") - - archive_name = base_name + '.tar' - if compress != 'compress': - archive_name += compress_ext.get(compress, '') - - mkpath(os.path.dirname(archive_name), dry_run=dry_run) - - # creating the tarball - import tarfile # late import so Python build itself doesn't break - - log.info('Creating tar archive') - - uid = _get_uid(owner) - gid = _get_gid(group) - - def _set_uid_gid(tarinfo): - if gid is not None: - tarinfo.gid = gid - tarinfo.gname = group - if uid is not None: - tarinfo.uid = uid - tarinfo.uname = owner - return tarinfo - - if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) - try: - tar.add(base_dir, filter=_set_uid_gid) - finally: - tar.close() - - # compression using `compress` - if compress == 'compress': - warn("'compress' will be deprecated.", PendingDeprecationWarning) - # the option varies depending on the platform - compressed_name = archive_name + compress_ext[compress] - if sys.platform == 'win32': - cmd = [compress, archive_name, compressed_name] - else: - cmd = [compress, '-f', archive_name] - spawn(cmd, dry_run=dry_run) - return compressed_name - - return archive_name - -def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): - """Create a zip file from all the files under 'base_dir'. - - The output zip file will be named 'base_name' + ".zip". Uses either the - "zipfile" Python module (if available) or the InfoZIP "zip" utility - (if installed and found on the default search path). If neither tool is - available, raises DistutilsExecError. Returns the name of the output zip - file. - """ - zip_filename = base_name + ".zip" - mkpath(os.path.dirname(zip_filename), dry_run=dry_run) - - # If zipfile module is not available, try spawning an external - # 'zip' command. - if zipfile is None: - if verbose: - zipoptions = "-r" - else: - zipoptions = "-rq" - - try: - spawn(["zip", zipoptions, zip_filename, base_dir], - dry_run=dry_run) - except DistutilsExecError: - # XXX really should distinguish between "couldn't find - # external 'zip' command" and "zip failed". - raise DistutilsExecError(("unable to create zip file '%s': " - "could neither import the 'zipfile' module nor " - "find a standalone zip utility") % zip_filename) - - else: - log.info("creating '%s' and adding '%s' to it", - zip_filename, base_dir) - - if not dry_run: - try: - zip = zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_DEFLATED) - except RuntimeError: - zip = zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_STORED) - - with zip: - if base_dir != os.curdir: - path = os.path.normpath(os.path.join(base_dir, '')) - zip.write(path, path) - log.info("adding '%s'", path) - for dirpath, dirnames, filenames in os.walk(base_dir): - for name in dirnames: - path = os.path.normpath(os.path.join(dirpath, name, '')) - zip.write(path, path) - log.info("adding '%s'", path) - for name in filenames: - path = os.path.normpath(os.path.join(dirpath, name)) - if os.path.isfile(path): - zip.write(path, path) - log.info("adding '%s'", path) - - return zip_filename - -ARCHIVE_FORMATS = { - 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), - 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), - 'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"), - 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"), - 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"), - 'zip': (make_zipfile, [],"ZIP file") - } - -def check_archive_formats(formats): - """Returns the first format from the 'format' list that is unknown. - - If all formats are known, returns None - """ - for format in formats: - if format not in ARCHIVE_FORMATS: - return format - return None - -def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, - dry_run=0, owner=None, group=None): - """Create an archive file (eg. zip or tar). - - 'base_name' is the name of the file to create, minus any format-specific - extension; 'format' is the archive format: one of "zip", "tar", "gztar", - "bztar", "xztar", or "ztar". - - 'root_dir' is a directory that will be the root directory of the - archive; ie. we typically chdir into 'root_dir' before creating the - archive. 'base_dir' is the directory where we start archiving from; - ie. 'base_dir' will be the common prefix of all files and - directories in the archive. 'root_dir' and 'base_dir' both default - to the current directory. Returns the name of the archive file. - - 'owner' and 'group' are used when creating a tar archive. By default, - uses the current owner and group. - """ - save_cwd = os.getcwd() - if root_dir is not None: - log.debug("changing into '%s'", root_dir) - base_name = os.path.abspath(base_name) - if not dry_run: - os.chdir(root_dir) - - if base_dir is None: - base_dir = os.curdir - - kwargs = {'dry_run': dry_run} - - try: - format_info = ARCHIVE_FORMATS[format] - except KeyError: - raise ValueError("unknown archive format '%s'" % format) - - func = format_info[0] - for arg, val in format_info[1]: - kwargs[arg] = val - - if format != 'zip': - kwargs['owner'] = owner - kwargs['group'] = group - - try: - filename = func(base_name, base_dir, **kwargs) - finally: - if root_dir is not None: - log.debug("changing back to '%s'", save_cwd) - os.chdir(save_cwd) - - return filename diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/bcppcompiler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/bcppcompiler.py deleted file mode 100755 index 2eb6d2e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/bcppcompiler.py +++ /dev/null @@ -1,393 +0,0 @@ -"""distutils.bcppcompiler - -Contains BorlandCCompiler, an implementation of the abstract CCompiler class -for the Borland C++ compiler. -""" - -# This implementation by Lyle Johnson, based on the original msvccompiler.py -# module and using the directions originally published by Gordon Williams. - -# XXX looks like there's a LOT of overlap between these two classes: -# someone should sit down and factor out the common code as -# WindowsCCompiler! --GPW - - -import os -from distutils.errors import \ - DistutilsExecError, \ - CompileError, LibError, LinkError, UnknownFileError -from distutils.ccompiler import \ - CCompiler, gen_preprocess_options -from distutils.file_util import write_file -from distutils.dep_util import newer -from distutils import log - -class BCPPCompiler(CCompiler) : - """Concrete class that implements an interface to the Borland C/C++ - compiler, as defined by the CCompiler abstract class. - """ - - compiler_type = 'bcpp' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = _c_extensions + _cpp_extensions - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - super().__init__(verbose, dry_run, force) - - # These executables are assumed to all be in the path. - # Borland doesn't seem to use any special registry settings to - # indicate their installation locations. - - self.cc = "bcc32.exe" - self.linker = "ilink32.exe" - self.lib = "tlib.exe" - - self.preprocess_options = None - self.compile_options = ['/tWM', '/O2', '/q', '/g0'] - self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0'] - - self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x'] - self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x'] - self.ldflags_static = [] - self.ldflags_exe = ['/Gn', '/q', '/x'] - self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r'] - - - # -- Worker methods ------------------------------------------------ - - def compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): - - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - compile_opts = extra_preargs or [] - compile_opts.append ('-c') - if debug: - compile_opts.extend (self.compile_options_debug) - else: - compile_opts.extend (self.compile_options) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - # XXX why do the normpath here? - src = os.path.normpath(src) - obj = os.path.normpath(obj) - # XXX _setup_compile() did a mkpath() too but before the normpath. - # Is it possible to skip the normpath? - self.mkpath(os.path.dirname(obj)) - - if ext == '.res': - # This is already a binary file -- skip it. - continue # the 'for' loop - if ext == '.rc': - # This needs to be compiled to a .res file -- do it now. - try: - self.spawn (["brcc32", "-fo", obj, src]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue # the 'for' loop - - # The next two are both for the real compiler. - if ext in self._c_extensions: - input_opt = "" - elif ext in self._cpp_extensions: - input_opt = "-P" - else: - # Unknown file type -- no extra options. The compiler - # will probably fail, but let it just in case this is a - # file the compiler recognizes even if we don't. - input_opt = "" - - output_opt = "-o" + obj - - # Compiler command line syntax is: "bcc32 [options] file(s)". - # Note that the source file names must appear at the end of - # the command line. - try: - self.spawn ([self.cc] + compile_opts + pp_opts + - [input_opt, output_opt] + - extra_postargs + [src]) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - # compile () - - - def create_static_lib (self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): - - (objects, output_dir) = self._fix_object_args (objects, output_dir) - output_filename = \ - self.library_filename (output_libname, output_dir=output_dir) - - if self._need_link (objects, output_filename): - lib_args = [output_filename, '/u'] + objects - if debug: - pass # XXX what goes here? - try: - self.spawn ([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - # create_static_lib () - - - def link (self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - # XXX this ignores 'build_temp'! should follow the lead of - # msvccompiler.py - - (objects, output_dir) = self._fix_object_args (objects, output_dir) - (libraries, library_dirs, runtime_library_dirs) = \ - self._fix_lib_args (libraries, library_dirs, runtime_library_dirs) - - if runtime_library_dirs: - log.warn("I don't know what to do with 'runtime_library_dirs': %s", - str(runtime_library_dirs)) - - if output_dir is not None: - output_filename = os.path.join (output_dir, output_filename) - - if self._need_link (objects, output_filename): - - # Figure out linker args based on type of target. - if target_desc == CCompiler.EXECUTABLE: - startup_obj = 'c0w32' - if debug: - ld_args = self.ldflags_exe_debug[:] - else: - ld_args = self.ldflags_exe[:] - else: - startup_obj = 'c0d32' - if debug: - ld_args = self.ldflags_shared_debug[:] - else: - ld_args = self.ldflags_shared[:] - - - # Create a temporary exports file for use by the linker - if export_symbols is None: - def_file = '' - else: - head, tail = os.path.split (output_filename) - modname, ext = os.path.splitext (tail) - temp_dir = os.path.dirname(objects[0]) # preserve tree structure - def_file = os.path.join (temp_dir, '%s.def' % modname) - contents = ['EXPORTS'] - for sym in (export_symbols or []): - contents.append(' %s=_%s' % (sym, sym)) - self.execute(write_file, (def_file, contents), - "writing %s" % def_file) - - # Borland C++ has problems with '/' in paths - objects2 = map(os.path.normpath, objects) - # split objects in .obj and .res files - # Borland C++ needs them at different positions in the command line - objects = [startup_obj] - resources = [] - for file in objects2: - (base, ext) = os.path.splitext(os.path.normcase(file)) - if ext == '.res': - resources.append(file) - else: - objects.append(file) - - - for l in library_dirs: - ld_args.append("/L%s" % os.path.normpath(l)) - ld_args.append("/L.") # we sometimes use relative paths - - # list of object files - ld_args.extend(objects) - - # XXX the command-line syntax for Borland C++ is a bit wonky; - # certain filenames are jammed together in one big string, but - # comma-delimited. This doesn't mesh too well with the - # Unix-centric attitude (with a DOS/Windows quoting hack) of - # 'spawn()', so constructing the argument list is a bit - # awkward. Note that doing the obvious thing and jamming all - # the filenames and commas into one argument would be wrong, - # because 'spawn()' would quote any filenames with spaces in - # them. Arghghh!. Apparently it works fine as coded... - - # name of dll/exe file - ld_args.extend([',',output_filename]) - # no map file and start libraries - ld_args.append(',,') - - for lib in libraries: - # see if we find it and if there is a bcpp specific lib - # (xxx_bcpp.lib) - libfile = self.find_library_file(library_dirs, lib, debug) - if libfile is None: - ld_args.append(lib) - # probably a BCPP internal library -- don't warn - else: - # full name which prefers bcpp_xxx.lib over xxx.lib - ld_args.append(libfile) - - # some default libraries - ld_args.append ('import32') - ld_args.append ('cw32mt') - - # def file for export symbols - ld_args.extend([',',def_file]) - # add resource files - ld_args.append(',') - ld_args.extend(resources) - - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - self.mkpath (os.path.dirname (output_filename)) - try: - self.spawn ([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - - else: - log.debug("skipping %s (up-to-date)", output_filename) - - # link () - - # -- Miscellaneous methods ----------------------------------------- - - - def find_library_file (self, dirs, lib, debug=0): - # List of effective library names to try, in order of preference: - # xxx_bcpp.lib is better than xxx.lib - # and xxx_d.lib is better than xxx.lib if debug is set - # - # The "_bcpp" suffix is to handle a Python installation for people - # with multiple compilers (primarily Distutils hackers, I suspect - # ;-). The idea is they'd have one static library for each - # compiler they care about, since (almost?) every Windows compiler - # seems to have a different format for static libraries. - if debug: - dlib = (lib + "_d") - try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib) - else: - try_names = (lib + "_bcpp", lib) - - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) - if os.path.exists(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None - - # overwrite the one from CCompiler to support rc and res-files - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - if ext not in (self.src_extensions + ['.rc','.res']): - raise UnknownFileError("unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res': - # these can go unchanged - obj_names.append (os.path.join (output_dir, base + ext)) - elif ext == '.rc': - # these need to be compiled to .res-files - obj_names.append (os.path.join (output_dir, base + '.res')) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - def preprocess (self, - source, - output_file=None, - macros=None, - include_dirs=None, - extra_preargs=None, - extra_postargs=None): - - (_, macros, include_dirs) = \ - self._fix_compile_args(None, macros, include_dirs) - pp_opts = gen_preprocess_options(macros, include_dirs) - pp_args = ['cpp32.exe'] + pp_opts - if output_file is not None: - pp_args.append('-o' + output_file) - if extra_preargs: - pp_args[:0] = extra_preargs - if extra_postargs: - pp_args.extend(extra_postargs) - pp_args.append(source) - - # We need to preprocess: either we're being forced to, or the - # source file is newer than the target (or the target doesn't - # exist). - if self.force or output_file is None or newer(source, output_file): - if output_file: - self.mkpath(os.path.dirname(output_file)) - try: - self.spawn(pp_args) - except DistutilsExecError as msg: - print(msg) - raise CompileError(msg) - - # preprocess() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/ccompiler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/ccompiler.py deleted file mode 100755 index 777fc66..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/ccompiler.py +++ /dev/null @@ -1,1123 +0,0 @@ -"""distutils.ccompiler - -Contains CCompiler, an abstract base class that defines the interface -for the Distutils compiler abstraction model.""" - -import sys, os, re -from distutils.errors import * -from distutils.spawn import spawn -from distutils.file_util import move_file -from distutils.dir_util import mkpath -from distutils.dep_util import newer_group -from distutils.util import split_quoted, execute -from distutils import log - -class CCompiler: - """Abstract base class to define the interface that must be implemented - by real compiler classes. Also has some utility methods used by - several compiler classes. - - The basic idea behind a compiler abstraction class is that each - instance can be used for all the compile/link steps in building a - single project. Thus, attributes common to all of those compile and - link steps -- include directories, macros to define, libraries to link - against, etc. -- are attributes of the compiler instance. To allow for - variability in how individual files are treated, most of those - attributes may be varied on a per-compilation or per-link basis. - """ - - # 'compiler_type' is a class attribute that identifies this class. It - # keeps code that wants to know what kind of compiler it's dealing with - # from having to import all possible compiler classes just to do an - # 'isinstance'. In concrete CCompiler subclasses, 'compiler_type' - # should really, really be one of the keys of the 'compiler_class' - # dictionary (see below -- used by the 'new_compiler()' factory - # function) -- authors of new compiler interface classes are - # responsible for updating 'compiler_class'! - compiler_type = None - - # XXX things not handled by this compiler abstraction model: - # * client can't provide additional options for a compiler, - # e.g. warning, optimization, debugging flags. Perhaps this - # should be the domain of concrete compiler abstraction classes - # (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base - # class should have methods for the common ones. - # * can't completely override the include or library searchg - # path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2". - # I'm not sure how widely supported this is even by Unix - # compilers, much less on other platforms. And I'm even less - # sure how useful it is; maybe for cross-compiling, but - # support for that is a ways off. (And anyways, cross - # compilers probably have a dedicated binary with the - # right paths compiled in. I hope.) - # * can't do really freaky things with the library list/library - # dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against - # different versions of libfoo.a in different locations. I - # think this is useless without the ability to null out the - # library search path anyways. - - - # Subclasses that rely on the standard filename generation methods - # implemented below should override these; see the comment near - # those methods ('object_filenames()' et. al.) for details: - src_extensions = None # list of strings - obj_extension = None # string - static_lib_extension = None - shared_lib_extension = None # string - static_lib_format = None # format string - shared_lib_format = None # prob. same as static_lib_format - exe_extension = None # string - - # Default language settings. language_map is used to detect a source - # file or Extension target language, checking source filenames. - # language_order is used to detect the language precedence, when deciding - # what language to use when mixing source types. For example, if some - # extension has two files with ".c" extension, and one with ".cpp", it - # is still linked as c++. - language_map = {".c" : "c", - ".cc" : "c++", - ".cpp" : "c++", - ".cxx" : "c++", - ".m" : "objc", - } - language_order = ["c++", "objc", "c"] - - def __init__(self, verbose=0, dry_run=0, force=0): - self.dry_run = dry_run - self.force = force - self.verbose = verbose - - # 'output_dir': a common output directory for object, library, - # shared object, and shared library files - self.output_dir = None - - # 'macros': a list of macro definitions (or undefinitions). A - # macro definition is a 2-tuple (name, value), where the value is - # either a string or None (no explicit value). A macro - # undefinition is a 1-tuple (name,). - self.macros = [] - - # 'include_dirs': a list of directories to search for include files - self.include_dirs = [] - - # 'libraries': a list of libraries to include in any link - # (library names, not filenames: eg. "foo" not "libfoo.a") - self.libraries = [] - - # 'library_dirs': a list of directories to search for libraries - self.library_dirs = [] - - # 'runtime_library_dirs': a list of directories to search for - # shared libraries/objects at runtime - self.runtime_library_dirs = [] - - # 'objects': a list of object files (or similar, such as explicitly - # named library files) to include on any link - self.objects = [] - - for key in self.executables.keys(): - self.set_executable(key, self.executables[key]) - - def set_executables(self, **kwargs): - """Define the executables (and options for them) that will be run - to perform the various stages of compilation. The exact set of - executables that may be specified here depends on the compiler - class (via the 'executables' class attribute), but most will have: - compiler the C/C++ compiler - linker_so linker used to create shared objects and libraries - linker_exe linker used to create binary executables - archiver static library creator - - On platforms with a command-line (Unix, DOS/Windows), each of these - is a string that will be split into executable name and (optional) - list of arguments. (Splitting the string is done similarly to how - Unix shells operate: words are delimited by spaces, but quotes and - backslashes can override this. See - 'distutils.util.split_quoted()'.) - """ - - # Note that some CCompiler implementation classes will define class - # attributes 'cpp', 'cc', etc. with hard-coded executable names; - # this is appropriate when a compiler class is for exactly one - # compiler/OS combination (eg. MSVCCompiler). Other compiler - # classes (UnixCCompiler, in particular) are driven by information - # discovered at run-time, since there are many different ways to do - # basically the same things with Unix C compilers. - - for key in kwargs: - if key not in self.executables: - raise ValueError("unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - self.set_executable(key, kwargs[key]) - - def set_executable(self, key, value): - if isinstance(value, str): - setattr(self, key, split_quoted(value)) - else: - setattr(self, key, value) - - def _find_macro(self, name): - i = 0 - for defn in self.macros: - if defn[0] == name: - return i - i += 1 - return None - - def _check_macro_definitions(self, definitions): - """Ensures that every element of 'definitions' is a valid macro - definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do - nothing if all definitions are OK, raise TypeError otherwise. - """ - for defn in definitions: - if not (isinstance(defn, tuple) and - (len(defn) in (1, 2) and - (isinstance (defn[1], str) or defn[1] is None)) and - isinstance (defn[0], str)): - raise TypeError(("invalid macro definition '%s': " % defn) + \ - "must be tuple (string,), (string, string), or " + \ - "(string, None)") - - - # -- Bookkeeping methods ------------------------------------------- - - def define_macro(self, name, value=None): - """Define a preprocessor macro for all compilations driven by this - compiler object. The optional parameter 'value' should be a - string; if it is not supplied, then the macro will be defined - without an explicit value and the exact outcome depends on the - compiler used (XXX true? does ANSI say anything about this?) - """ - # Delete from the list of macro definitions/undefinitions if - # already there (so that this one will take precedence). - i = self._find_macro (name) - if i is not None: - del self.macros[i] - - self.macros.append((name, value)) - - def undefine_macro(self, name): - """Undefine a preprocessor macro for all compilations driven by - this compiler object. If the same macro is defined by - 'define_macro()' and undefined by 'undefine_macro()' the last call - takes precedence (including multiple redefinitions or - undefinitions). If the macro is redefined/undefined on a - per-compilation basis (ie. in the call to 'compile()'), then that - takes precedence. - """ - # Delete from the list of macro definitions/undefinitions if - # already there (so that this one will take precedence). - i = self._find_macro (name) - if i is not None: - del self.macros[i] - - undefn = (name,) - self.macros.append(undefn) - - def add_include_dir(self, dir): - """Add 'dir' to the list of directories that will be searched for - header files. The compiler is instructed to search directories in - the order in which they are supplied by successive calls to - 'add_include_dir()'. - """ - self.include_dirs.append(dir) - - def set_include_dirs(self, dirs): - """Set the list of directories that will be searched to 'dirs' (a - list of strings). Overrides any preceding calls to - 'add_include_dir()'; subsequence calls to 'add_include_dir()' add - to the list passed to 'set_include_dirs()'. This does not affect - any list of standard include directories that the compiler may - search by default. - """ - self.include_dirs = dirs[:] - - def add_library(self, libname): - """Add 'libname' to the list of libraries that will be included in - all links driven by this compiler object. Note that 'libname' - should *not* be the name of a file containing a library, but the - name of the library itself: the actual filename will be inferred by - the linker, the compiler, or the compiler class (depending on the - platform). - - The linker will be instructed to link against libraries in the - order they were supplied to 'add_library()' and/or - 'set_libraries()'. It is perfectly valid to duplicate library - names; the linker will be instructed to link against libraries as - many times as they are mentioned. - """ - self.libraries.append(libname) - - def set_libraries(self, libnames): - """Set the list of libraries to be included in all links driven by - this compiler object to 'libnames' (a list of strings). This does - not affect any standard system libraries that the linker may - include by default. - """ - self.libraries = libnames[:] - - def add_library_dir(self, dir): - """Add 'dir' to the list of directories that will be searched for - libraries specified to 'add_library()' and 'set_libraries()'. The - linker will be instructed to search for libraries in the order they - are supplied to 'add_library_dir()' and/or 'set_library_dirs()'. - """ - self.library_dirs.append(dir) - - def set_library_dirs(self, dirs): - """Set the list of library search directories to 'dirs' (a list of - strings). This does not affect any standard library search path - that the linker may search by default. - """ - self.library_dirs = dirs[:] - - def add_runtime_library_dir(self, dir): - """Add 'dir' to the list of directories that will be searched for - shared libraries at runtime. - """ - self.runtime_library_dirs.append(dir) - - def set_runtime_library_dirs(self, dirs): - """Set the list of directories to search for shared libraries at - runtime to 'dirs' (a list of strings). This does not affect any - standard search path that the runtime linker may search by - default. - """ - self.runtime_library_dirs = dirs[:] - - def add_link_object(self, object): - """Add 'object' to the list of object files (or analogues, such as - explicitly named library files or the output of "resource - compilers") to be included in every link driven by this compiler - object. - """ - self.objects.append(object) - - def set_link_objects(self, objects): - """Set the list of object files (or analogues) to be included in - every link to 'objects'. This does not affect any standard object - files that the linker may include by default (such as system - libraries). - """ - self.objects = objects[:] - - - # -- Private utility methods -------------------------------------- - # (here for the convenience of subclasses) - - # Helper method to prep compiler in subclass compile() methods - - def _setup_compile(self, outdir, macros, incdirs, sources, depends, - extra): - """Process arguments and decide which source files to compile.""" - if outdir is None: - outdir = self.output_dir - elif not isinstance(outdir, str): - raise TypeError("'output_dir' must be a string or None") - - if macros is None: - macros = self.macros - elif isinstance(macros, list): - macros = macros + (self.macros or []) - else: - raise TypeError("'macros' (if supplied) must be a list of tuples") - - if incdirs is None: - incdirs = self.include_dirs - elif isinstance(incdirs, (list, tuple)): - incdirs = list(incdirs) + (self.include_dirs or []) - else: - raise TypeError( - "'include_dirs' (if supplied) must be a list of strings") - - if extra is None: - extra = [] - - # Get the list of expected output (object) files - objects = self.object_filenames(sources, strip_dir=0, - output_dir=outdir) - assert len(objects) == len(sources) - - pp_opts = gen_preprocess_options(macros, incdirs) - - build = {} - for i in range(len(sources)): - src = sources[i] - obj = objects[i] - ext = os.path.splitext(src)[1] - self.mkpath(os.path.dirname(obj)) - build[obj] = (src, ext) - - return macros, objects, extra, pp_opts, build - - def _get_cc_args(self, pp_opts, debug, before): - # works for unixccompiler, cygwinccompiler - cc_args = pp_opts + ['-c'] - if debug: - cc_args[:0] = ['-g'] - if before: - cc_args[:0] = before - return cc_args - - def _fix_compile_args(self, output_dir, macros, include_dirs): - """Typecheck and fix-up some of the arguments to the 'compile()' - method, and return fixed-up values. Specifically: if 'output_dir' - is None, replaces it with 'self.output_dir'; ensures that 'macros' - is a list, and augments it with 'self.macros'; ensures that - 'include_dirs' is a list, and augments it with 'self.include_dirs'. - Guarantees that the returned values are of the correct type, - i.e. for 'output_dir' either string or None, and for 'macros' and - 'include_dirs' either list or None. - """ - if output_dir is None: - output_dir = self.output_dir - elif not isinstance(output_dir, str): - raise TypeError("'output_dir' must be a string or None") - - if macros is None: - macros = self.macros - elif isinstance(macros, list): - macros = macros + (self.macros or []) - else: - raise TypeError("'macros' (if supplied) must be a list of tuples") - - if include_dirs is None: - include_dirs = self.include_dirs - elif isinstance(include_dirs, (list, tuple)): - include_dirs = list(include_dirs) + (self.include_dirs or []) - else: - raise TypeError( - "'include_dirs' (if supplied) must be a list of strings") - - return output_dir, macros, include_dirs - - def _prep_compile(self, sources, output_dir, depends=None): - """Decide which source files must be recompiled. - - Determine the list of object files corresponding to 'sources', - and figure out which ones really need to be recompiled. - Return a list of all object files and a dictionary telling - which source files can be skipped. - """ - # Get the list of expected output (object) files - objects = self.object_filenames(sources, output_dir=output_dir) - assert len(objects) == len(sources) - - # Return an empty dict for the "which source files can be skipped" - # return value to preserve API compatibility. - return objects, {} - - def _fix_object_args(self, objects, output_dir): - """Typecheck and fix up some arguments supplied to various methods. - Specifically: ensure that 'objects' is a list; if output_dir is - None, replace with self.output_dir. Return fixed versions of - 'objects' and 'output_dir'. - """ - if not isinstance(objects, (list, tuple)): - raise TypeError("'objects' must be a list or tuple of strings") - objects = list(objects) - - if output_dir is None: - output_dir = self.output_dir - elif not isinstance(output_dir, str): - raise TypeError("'output_dir' must be a string or None") - - return (objects, output_dir) - - def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs): - """Typecheck and fix up some of the arguments supplied to the - 'link_*' methods. Specifically: ensure that all arguments are - lists, and augment them with their permanent versions - (eg. 'self.libraries' augments 'libraries'). Return a tuple with - fixed versions of all arguments. - """ - if libraries is None: - libraries = self.libraries - elif isinstance(libraries, (list, tuple)): - libraries = list (libraries) + (self.libraries or []) - else: - raise TypeError( - "'libraries' (if supplied) must be a list of strings") - - if library_dirs is None: - library_dirs = self.library_dirs - elif isinstance(library_dirs, (list, tuple)): - library_dirs = list (library_dirs) + (self.library_dirs or []) - else: - raise TypeError( - "'library_dirs' (if supplied) must be a list of strings") - - if runtime_library_dirs is None: - runtime_library_dirs = self.runtime_library_dirs - elif isinstance(runtime_library_dirs, (list, tuple)): - runtime_library_dirs = (list(runtime_library_dirs) + - (self.runtime_library_dirs or [])) - else: - raise TypeError("'runtime_library_dirs' (if supplied) " - "must be a list of strings") - - return (libraries, library_dirs, runtime_library_dirs) - - def _need_link(self, objects, output_file): - """Return true if we need to relink the files listed in 'objects' - to recreate 'output_file'. - """ - if self.force: - return True - else: - if self.dry_run: - newer = newer_group (objects, output_file, missing='newer') - else: - newer = newer_group (objects, output_file) - return newer - - def detect_language(self, sources): - """Detect the language of a given file, or list of files. Uses - language_map, and language_order to do the job. - """ - if not isinstance(sources, list): - sources = [sources] - lang = None - index = len(self.language_order) - for source in sources: - base, ext = os.path.splitext(source) - extlang = self.language_map.get(ext) - try: - extindex = self.language_order.index(extlang) - if extindex < index: - lang = extlang - index = extindex - except ValueError: - pass - return lang - - - # -- Worker methods ------------------------------------------------ - # (must be implemented by subclasses) - - def preprocess(self, source, output_file=None, macros=None, - include_dirs=None, extra_preargs=None, extra_postargs=None): - """Preprocess a single C/C++ source file, named in 'source'. - Output will be written to file named 'output_file', or stdout if - 'output_file' not supplied. 'macros' is a list of macro - definitions as for 'compile()', which will augment the macros set - with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a - list of directory names that will be added to the default list. - - Raises PreprocessError on failure. - """ - pass - - def compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """Compile one or more source files. - - 'sources' must be a list of filenames, most likely C/C++ - files, but in reality anything that can be handled by a - particular compiler and compiler class (eg. MSVCCompiler can - handle resource files in 'sources'). Return a list of object - filenames, one per source filename in 'sources'. Depending on - the implementation, not all source files will necessarily be - compiled, but all corresponding object filenames will be - returned. - - If 'output_dir' is given, object files will be put under it, while - retaining their original path component. That is, "foo/bar.c" - normally compiles to "foo/bar.o" (for a Unix implementation); if - 'output_dir' is "build", then it would compile to - "build/foo/bar.o". - - 'macros', if given, must be a list of macro definitions. A macro - definition is either a (name, value) 2-tuple or a (name,) 1-tuple. - The former defines a macro; if the value is None, the macro is - defined without an explicit value. The 1-tuple case undefines a - macro. Later definitions/redefinitions/ undefinitions take - precedence. - - 'include_dirs', if given, must be a list of strings, the - directories to add to the default include file search path for this - compilation only. - - 'debug' is a boolean; if true, the compiler will be instructed to - output debug symbols in (or alongside) the object file(s). - - 'extra_preargs' and 'extra_postargs' are implementation- dependent. - On platforms that have the notion of a command-line (e.g. Unix, - DOS/Windows), they are most likely lists of strings: extra - command-line arguments to prepend/append to the compiler command - line. On other platforms, consult the implementation class - documentation. In any event, they are intended as an escape hatch - for those occasions when the abstract compiler framework doesn't - cut the mustard. - - 'depends', if given, is a list of filenames that all targets - depend on. If a source file is older than any file in - depends, then the source file will be recompiled. This - supports dependency tracking, but only at a coarse - granularity. - - Raises CompileError on failure. - """ - # A concrete compiler class can either override this method - # entirely or implement _compile(). - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - - # Return *all* object filenames, not just the ones we just built. - return objects - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - # A concrete compiler class that does not override compile() - # should implement _compile(). - pass - - def create_static_lib(self, objects, output_libname, output_dir=None, - debug=0, target_lang=None): - """Link a bunch of stuff together to create a static library file. - The "bunch of stuff" consists of the list of object files supplied - as 'objects', the extra object files supplied to - 'add_link_object()' and/or 'set_link_objects()', the libraries - supplied to 'add_library()' and/or 'set_libraries()', and the - libraries supplied as 'libraries' (if any). - - 'output_libname' should be a library name, not a filename; the - filename will be inferred from the library name. 'output_dir' is - the directory where the library file will be put. - - 'debug' is a boolean; if true, debugging information will be - included in the library (note that on most platforms, it is the - compile step where this matters: the 'debug' flag is included here - just for consistency). - - 'target_lang' is the target language for which the given objects - are being compiled. This allows specific linkage time treatment of - certain languages. - - Raises LibError on failure. - """ - pass - - - # values for target_desc parameter in link() - SHARED_OBJECT = "shared_object" - SHARED_LIBRARY = "shared_library" - EXECUTABLE = "executable" - - def link(self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - """Link a bunch of stuff together to create an executable or - shared library file. - - The "bunch of stuff" consists of the list of object files supplied - as 'objects'. 'output_filename' should be a filename. If - 'output_dir' is supplied, 'output_filename' is relative to it - (i.e. 'output_filename' can provide directory components if - needed). - - 'libraries' is a list of libraries to link against. These are - library names, not filenames, since they're translated into - filenames in a platform-specific way (eg. "foo" becomes "libfoo.a" - on Unix and "foo.lib" on DOS/Windows). However, they can include a - directory component, which means the linker will look in that - specific directory rather than searching all the normal locations. - - 'library_dirs', if supplied, should be a list of directories to - search for libraries that were specified as bare library names - (ie. no directory component). These are on top of the system - default and those supplied to 'add_library_dir()' and/or - 'set_library_dirs()'. 'runtime_library_dirs' is a list of - directories that will be embedded into the shared library and used - to search for other shared libraries that *it* depends on at - run-time. (This may only be relevant on Unix.) - - 'export_symbols' is a list of symbols that the shared library will - export. (This appears to be relevant only on Windows.) - - 'debug' is as for 'compile()' and 'create_static_lib()', with the - slight distinction that it actually matters on most platforms (as - opposed to 'create_static_lib()', which includes a 'debug' flag - mostly for form's sake). - - 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except - of course that they supply command-line arguments for the - particular linker being used). - - 'target_lang' is the target language for which the given objects - are being compiled. This allows specific linkage time treatment of - certain languages. - - Raises LinkError on failure. - """ - raise NotImplementedError - - - # Old 'link_*()' methods, rewritten to use the new 'link()' method. - - def link_shared_lib(self, - objects, - output_libname, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - self.link(CCompiler.SHARED_LIBRARY, objects, - self.library_filename(output_libname, lib_type='shared'), - output_dir, - libraries, library_dirs, runtime_library_dirs, - export_symbols, debug, - extra_preargs, extra_postargs, build_temp, target_lang) - - - def link_shared_object(self, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - self.link(CCompiler.SHARED_OBJECT, objects, - output_filename, output_dir, - libraries, library_dirs, runtime_library_dirs, - export_symbols, debug, - extra_preargs, extra_postargs, build_temp, target_lang) - - - def link_executable(self, - objects, - output_progname, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - target_lang=None): - self.link(CCompiler.EXECUTABLE, objects, - self.executable_filename(output_progname), output_dir, - libraries, library_dirs, runtime_library_dirs, None, - debug, extra_preargs, extra_postargs, None, target_lang) - - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function; there is - # no appropriate default implementation so subclasses should - # implement all of these. - - def library_dir_option(self, dir): - """Return the compiler option to add 'dir' to the list of - directories searched for libraries. - """ - raise NotImplementedError - - def runtime_library_dir_option(self, dir): - """Return the compiler option to add 'dir' to the list of - directories searched for runtime libraries. - """ - raise NotImplementedError - - def library_option(self, lib): - """Return the compiler option to add 'lib' to the list of libraries - linked into the shared library or executable. - """ - raise NotImplementedError - - def has_function(self, funcname, includes=None, include_dirs=None, - libraries=None, library_dirs=None): - """Return a boolean indicating whether funcname is supported on - the current platform. The optional arguments can be used to - augment the compilation environment. - """ - # this can't be included at module scope because it tries to - # import math which might not be available at that point - maybe - # the necessary logic should just be inlined? - import tempfile - if includes is None: - includes = [] - if include_dirs is None: - include_dirs = [] - if libraries is None: - libraries = [] - if library_dirs is None: - library_dirs = [] - fd, fname = tempfile.mkstemp(".c", funcname, text=True) - f = os.fdopen(fd, "w") - try: - for incl in includes: - f.write("""#include "%s"\n""" % incl) - f.write("""\ -int main (int argc, char **argv) { - %s(); - return 0; -} -""" % funcname) - finally: - f.close() - try: - objects = self.compile([fname], include_dirs=include_dirs) - except CompileError: - return False - finally: - os.remove(fname) - - try: - self.link_executable(objects, "a.out", - libraries=libraries, - library_dirs=library_dirs) - except (LinkError, TypeError): - return False - else: - os.remove(os.path.join(self.output_dir or '', "a.out")) - finally: - for fn in objects: - os.remove(fn) - return True - - def find_library_file (self, dirs, lib, debug=0): - """Search the specified list of directories for a static or shared - library file 'lib' and return the full path to that file. If - 'debug' true, look for a debugging version (if that makes sense on - the current platform). Return None if 'lib' wasn't found in any of - the specified directories. - """ - raise NotImplementedError - - # -- Filename generation methods ----------------------------------- - - # The default implementation of the filename generating methods are - # prejudiced towards the Unix/DOS/Windows view of the world: - # * object files are named by replacing the source file extension - # (eg. .c/.cpp -> .o/.obj) - # * library files (shared or static) are named by plugging the - # library name and extension into a format string, eg. - # "lib%s.%s" % (lib_name, ".a") for Unix static libraries - # * executables are named by appending an extension (possibly - # empty) to the program name: eg. progname + ".exe" for - # Windows - # - # To reduce redundant code, these methods expect to find - # several attributes in the current object (presumably defined - # as class attributes): - # * src_extensions - - # list of C/C++ source file extensions, eg. ['.c', '.cpp'] - # * obj_extension - - # object file extension, eg. '.o' or '.obj' - # * static_lib_extension - - # extension for static library files, eg. '.a' or '.lib' - # * shared_lib_extension - - # extension for shared library/object files, eg. '.so', '.dll' - # * static_lib_format - - # format string for generating static library filenames, - # eg. 'lib%s.%s' or '%s.%s' - # * shared_lib_format - # format string for generating shared library filenames - # (probably same as static_lib_format, since the extension - # is one of the intended parameters to the format string) - # * exe_extension - - # extension for executable files, eg. '' or '.exe' - - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(src_name) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if ext not in self.src_extensions: - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_names.append(os.path.join(output_dir, - base + self.obj_extension)) - return obj_names - - def shared_object_filename(self, basename, strip_dir=0, output_dir=''): - assert output_dir is not None - if strip_dir: - basename = os.path.basename(basename) - return os.path.join(output_dir, basename + self.shared_lib_extension) - - def executable_filename(self, basename, strip_dir=0, output_dir=''): - assert output_dir is not None - if strip_dir: - basename = os.path.basename(basename) - return os.path.join(output_dir, basename + (self.exe_extension or '')) - - def library_filename(self, libname, lib_type='static', # or 'shared' - strip_dir=0, output_dir=''): - assert output_dir is not None - if lib_type not in ("static", "shared", "dylib", "xcode_stub"): - raise ValueError( - "'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"") - fmt = getattr(self, lib_type + "_lib_format") - ext = getattr(self, lib_type + "_lib_extension") - - dir, base = os.path.split(libname) - filename = fmt % (base, ext) - if strip_dir: - dir = '' - - return os.path.join(output_dir, dir, filename) - - - # -- Utility methods ----------------------------------------------- - - def announce(self, msg, level=1): - log.debug(msg) - - def debug_print(self, msg): - from distutils.debug import DEBUG - if DEBUG: - print(msg) - - def warn(self, msg): - sys.stderr.write("warning: %s\n" % msg) - - def execute(self, func, args, msg=None, level=1): - execute(func, args, msg, self.dry_run) - - def spawn(self, cmd, **kwargs): - spawn(cmd, dry_run=self.dry_run, **kwargs) - - def move_file(self, src, dst): - return move_file(src, dst, dry_run=self.dry_run) - - def mkpath (self, name, mode=0o777): - mkpath(name, mode, dry_run=self.dry_run) - - -# Map a sys.platform/os.name ('posix', 'nt') to the default compiler -# type for that platform. Keys are interpreted as re match -# patterns. Order is important; platform mappings are preferred over -# OS names. -_default_compilers = ( - - # Platform string mappings - - # on a cygwin built python we can use gcc like an ordinary UNIXish - # compiler - ('cygwin.*', 'unix'), - - # OS name mappings - ('posix', 'unix'), - ('nt', 'msvc'), - - ) - -def get_default_compiler(osname=None, platform=None): - """Determine the default compiler to use for the given platform. - - osname should be one of the standard Python OS names (i.e. the - ones returned by os.name) and platform the common value - returned by sys.platform for the platform in question. - - The default values are os.name and sys.platform in case the - parameters are not given. - """ - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - for pattern, compiler in _default_compilers: - if re.match(pattern, platform) is not None or \ - re.match(pattern, osname) is not None: - return compiler - # Default to Unix compiler - return 'unix' - -# Map compiler types to (module_name, class_name) pairs -- ie. where to -# find the code that implements an interface to this compiler. (The module -# is assumed to be in the 'distutils' package.) -compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler', - "standard UNIX-style compiler"), - 'msvc': ('_msvccompiler', 'MSVCCompiler', - "Microsoft Visual C++"), - 'cygwin': ('cygwinccompiler', 'CygwinCCompiler', - "Cygwin port of GNU C Compiler for Win32"), - 'mingw32': ('cygwinccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"), - 'bcpp': ('bcppcompiler', 'BCPPCompiler', - "Borland C++ Compiler"), - } - -def show_compilers(): - """Print list of available compilers (used by the "--help-compiler" - options to "build", "build_ext", "build_clib"). - """ - # XXX this "knows" that the compiler option it's describing is - # "--compiler", which just happens to be the case for the three - # commands that use it. - from distutils.fancy_getopt import FancyGetopt - compilers = [] - for compiler in compiler_class.keys(): - compilers.append(("compiler="+compiler, None, - compiler_class[compiler][2])) - compilers.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("List of available compilers:") - - -def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0): - """Generate an instance of some CCompiler subclass for the supplied - platform/compiler combination. 'plat' defaults to 'os.name' - (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler - for that platform. Currently only 'posix' and 'nt' are supported, and - the default compilers are "traditional Unix interface" (UnixCCompiler - class) and Visual C++ (MSVCCompiler class). Note that it's perfectly - possible to ask for a Unix compiler object under Windows, and a - Microsoft compiler object under Unix -- if you supply a value for - 'compiler', 'plat' is ignored. - """ - if plat is None: - plat = os.name - - try: - if compiler is None: - compiler = get_default_compiler(plat) - - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - - try: - module_name = "distutils." + module_name - __import__ (module_name) - module = sys.modules[module_name] - klass = vars(module)[class_name] - except ImportError: - raise DistutilsModuleError( - "can't compile C/C++ code: unable to load module '%s'" % \ - module_name) - except KeyError: - raise DistutilsModuleError( - "can't compile C/C++ code: unable to find class '%s' " - "in module '%s'" % (class_name, module_name)) - - # XXX The None is necessary to preserve backwards compatibility - # with classes that expect verbose to be the first positional - # argument. - return klass(None, dry_run, force) - - -def gen_preprocess_options(macros, include_dirs): - """Generate C pre-processor options (-D, -U, -I) as used by at least - two types of compilers: the typical Unix compiler and Visual C++. - 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,) - means undefine (-U) macro 'name', and (name,value) means define (-D) - macro 'name' to 'value'. 'include_dirs' is just a list of directory - names to be added to the header file search path (-I). Returns a list - of command-line options suitable for either Unix compilers or Visual - C++. - """ - # XXX it would be nice (mainly aesthetic, and so we don't generate - # stupid-looking command lines) to go over 'macros' and eliminate - # redundant definitions/undefinitions (ie. ensure that only the - # latest mention of a particular macro winds up on the command - # line). I don't think it's essential, though, since most (all?) - # Unix C compilers only pay attention to the latest -D or -U - # mention of a macro on their command line. Similar situation for - # 'include_dirs'. I'm punting on both for now. Anyways, weeding out - # redundancies like this should probably be the province of - # CCompiler, since the data structures used are inherited from it - # and therefore common to all CCompiler classes. - pp_opts = [] - for macro in macros: - if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2): - raise TypeError( - "bad macro definition '%s': " - "each element of 'macros' list must be a 1- or 2-tuple" - % macro) - - if len(macro) == 1: # undefine this macro - pp_opts.append("-U%s" % macro[0]) - elif len(macro) == 2: - if macro[1] is None: # define with no explicit value - pp_opts.append("-D%s" % macro[0]) - else: - # XXX *don't* need to be clever about quoting the - # macro value here, because we're going to avoid the - # shell at all costs when we spawn the command! - pp_opts.append("-D%s=%s" % macro) - - for dir in include_dirs: - pp_opts.append("-I%s" % dir) - return pp_opts - - -def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries): - """Generate linker options for searching library directories and - linking with specific libraries. 'libraries' and 'library_dirs' are, - respectively, lists of library names (not filenames!) and search - directories. Returns a list of command-line options suitable for use - with some compiler (depending on the two format strings passed in). - """ - lib_opts = [] - - for dir in library_dirs: - lib_opts.append(compiler.library_dir_option(dir)) - - for dir in runtime_library_dirs: - opt = compiler.runtime_library_dir_option(dir) - if isinstance(opt, list): - lib_opts = lib_opts + opt - else: - lib_opts.append(opt) - - # XXX it's important that we *not* remove redundant library mentions! - # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to - # resolve all symbols. I just hope we never have to say "-lfoo obj.o - # -lbar" to get things to work -- that's certainly a possibility, but a - # pretty nasty way to arrange your C code. - - for lib in libraries: - (lib_dir, lib_name) = os.path.split(lib) - if lib_dir: - lib_file = compiler.find_library_file([lib_dir], lib_name) - if lib_file: - lib_opts.append(lib_file) - else: - compiler.warn("no library file corresponding to " - "'%s' found (skipping)" % lib) - else: - lib_opts.append(compiler.library_option (lib)) - return lib_opts diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/cmd.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/cmd.py deleted file mode 100755 index dba3191..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/cmd.py +++ /dev/null @@ -1,403 +0,0 @@ -"""distutils.cmd - -Provides the Command class, the base class for the command classes -in the distutils.command package. -""" - -import sys, os, re -from distutils.errors import DistutilsOptionError -from distutils import util, dir_util, file_util, archive_util, dep_util -from distutils import log - -class Command: - """Abstract base class for defining command classes, the "worker bees" - of the Distutils. A useful analogy for command classes is to think of - them as subroutines with local variables called "options". The options - are "declared" in 'initialize_options()' and "defined" (given their - final values, aka "finalized") in 'finalize_options()', both of which - must be defined by every command class. The distinction between the - two is necessary because option values might come from the outside - world (command line, config file, ...), and any options dependent on - other options must be computed *after* these outside influences have - been processed -- hence 'finalize_options()'. The "body" of the - subroutine, where it does all its work based on the values of its - options, is the 'run()' method, which must also be implemented by every - command class. - """ - - # 'sub_commands' formalizes the notion of a "family" of commands, - # eg. "install" as the parent with sub-commands "install_lib", - # "install_headers", etc. The parent of a family of commands - # defines 'sub_commands' as a class attribute; it's a list of - # (command_name : string, predicate : unbound_method | string | None) - # tuples, where 'predicate' is a method of the parent command that - # determines whether the corresponding command is applicable in the - # current situation. (Eg. we "install_headers" is only applicable if - # we have any C header files to install.) If 'predicate' is None, - # that command is always applicable. - # - # 'sub_commands' is usually defined at the *end* of a class, because - # predicates can be unbound methods, so they must already have been - # defined. The canonical example is the "install" command. - sub_commands = [] - - - # -- Creation/initialization methods ------------------------------- - - def __init__(self, dist): - """Create and initialize a new Command object. Most importantly, - invokes the 'initialize_options()' method, which is the real - initializer and depends on the actual command being - instantiated. - """ - # late import because of mutual dependence between these classes - from distutils.dist import Distribution - - if not isinstance(dist, Distribution): - raise TypeError("dist must be a Distribution instance") - if self.__class__ is Command: - raise RuntimeError("Command is an abstract class") - - self.distribution = dist - self.initialize_options() - - # Per-command versions of the global flags, so that the user can - # customize Distutils' behaviour command-by-command and let some - # commands fall back on the Distribution's behaviour. None means - # "not defined, check self.distribution's copy", while 0 or 1 mean - # false and true (duh). Note that this means figuring out the real - # value of each flag is a touch complicated -- hence "self._dry_run" - # will be handled by __getattr__, below. - # XXX This needs to be fixed. - self._dry_run = None - - # verbose is largely ignored, but needs to be set for - # backwards compatibility (I think)? - self.verbose = dist.verbose - - # Some commands define a 'self.force' option to ignore file - # timestamps, but methods defined *here* assume that - # 'self.force' exists for all commands. So define it here - # just to be safe. - self.force = None - - # The 'help' flag is just used for command-line parsing, so - # none of that complicated bureaucracy is needed. - self.help = 0 - - # 'finalized' records whether or not 'finalize_options()' has been - # called. 'finalize_options()' itself should not pay attention to - # this flag: it is the business of 'ensure_finalized()', which - # always calls 'finalize_options()', to respect/update it. - self.finalized = 0 - - # XXX A more explicit way to customize dry_run would be better. - def __getattr__(self, attr): - if attr == 'dry_run': - myval = getattr(self, "_" + attr) - if myval is None: - return getattr(self.distribution, attr) - else: - return myval - else: - raise AttributeError(attr) - - def ensure_finalized(self): - if not self.finalized: - self.finalize_options() - self.finalized = 1 - - # Subclasses must define: - # initialize_options() - # provide default values for all options; may be customized by - # setup script, by options from config file(s), or by command-line - # options - # finalize_options() - # decide on the final values for all options; this is called - # after all possible intervention from the outside world - # (command-line, option file, etc.) has been processed - # run() - # run the command: do whatever it is we're here to do, - # controlled by the command's various option values - - def initialize_options(self): - """Set default values for all the options that this command - supports. Note that these defaults may be overridden by other - commands, by the setup script, by config files, or by the - command-line. Thus, this is not the place to code dependencies - between options; generally, 'initialize_options()' implementations - are just a bunch of "self.foo = None" assignments. - - This method must be implemented by all command classes. - """ - raise RuntimeError("abstract method -- subclass %s must override" - % self.__class__) - - def finalize_options(self): - """Set final values for all the options that this command supports. - This is always called as late as possible, ie. after any option - assignments from the command-line or from other commands have been - done. Thus, this is the place to code option dependencies: if - 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as - long as 'foo' still has the same value it was assigned in - 'initialize_options()'. - - This method must be implemented by all command classes. - """ - raise RuntimeError("abstract method -- subclass %s must override" - % self.__class__) - - - def dump_options(self, header=None, indent=""): - from distutils.fancy_getopt import longopt_xlate - if header is None: - header = "command options for '%s':" % self.get_command_name() - self.announce(indent + header, level=log.INFO) - indent = indent + " " - for (option, _, _) in self.user_options: - option = option.translate(longopt_xlate) - if option[-1] == "=": - option = option[:-1] - value = getattr(self, option) - self.announce(indent + "%s = %s" % (option, value), - level=log.INFO) - - def run(self): - """A command's raison d'etre: carry out the action it exists to - perform, controlled by the options initialized in - 'initialize_options()', customized by other commands, the setup - script, the command-line, and config files, and finalized in - 'finalize_options()'. All terminal output and filesystem - interaction should be done by 'run()'. - - This method must be implemented by all command classes. - """ - raise RuntimeError("abstract method -- subclass %s must override" - % self.__class__) - - def announce(self, msg, level=1): - """If the current verbosity level is of greater than or equal to - 'level' print 'msg' to stdout. - """ - log.log(level, msg) - - def debug_print(self, msg): - """Print 'msg' to stdout if the global DEBUG (taken from the - DISTUTILS_DEBUG environment variable) flag is true. - """ - from distutils.debug import DEBUG - if DEBUG: - print(msg) - sys.stdout.flush() - - - # -- Option validation methods ------------------------------------- - # (these are very handy in writing the 'finalize_options()' method) - # - # NB. the general philosophy here is to ensure that a particular option - # value meets certain type and value constraints. If not, we try to - # force it into conformance (eg. if we expect a list but have a string, - # split the string on comma and/or whitespace). If we can't force the - # option into conformance, raise DistutilsOptionError. Thus, command - # classes need do nothing more than (eg.) - # self.ensure_string_list('foo') - # and they can be guaranteed that thereafter, self.foo will be - # a list of strings. - - def _ensure_stringlike(self, option, what, default=None): - val = getattr(self, option) - if val is None: - setattr(self, option, default) - return default - elif not isinstance(val, str): - raise DistutilsOptionError("'%s' must be a %s (got `%s`)" - % (option, what, val)) - return val - - def ensure_string(self, option, default=None): - """Ensure that 'option' is a string; if not defined, set it to - 'default'. - """ - self._ensure_stringlike(option, "string", default) - - def ensure_string_list(self, option): - r"""Ensure that 'option' is a list of strings. If 'option' is - currently a string, we split it either on /,\s*/ or /\s+/, so - "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become - ["foo", "bar", "baz"]. - """ - val = getattr(self, option) - if val is None: - return - elif isinstance(val, str): - setattr(self, option, re.split(r',\s*|\s+', val)) - else: - if isinstance(val, list): - ok = all(isinstance(v, str) for v in val) - else: - ok = False - if not ok: - raise DistutilsOptionError( - "'%s' must be a list of strings (got %r)" - % (option, val)) - - def _ensure_tested_string(self, option, tester, what, error_fmt, - default=None): - val = self._ensure_stringlike(option, what, default) - if val is not None and not tester(val): - raise DistutilsOptionError(("error in '%s' option: " + error_fmt) - % (option, val)) - - def ensure_filename(self, option): - """Ensure that 'option' is the name of an existing file.""" - self._ensure_tested_string(option, os.path.isfile, - "filename", - "'%s' does not exist or is not a file") - - def ensure_dirname(self, option): - self._ensure_tested_string(option, os.path.isdir, - "directory name", - "'%s' does not exist or is not a directory") - - - # -- Convenience methods for commands ------------------------------ - - def get_command_name(self): - if hasattr(self, 'command_name'): - return self.command_name - else: - return self.__class__.__name__ - - def set_undefined_options(self, src_cmd, *option_pairs): - """Set the values of any "undefined" options from corresponding - option values in some other command object. "Undefined" here means - "is None", which is the convention used to indicate that an option - has not been changed between 'initialize_options()' and - 'finalize_options()'. Usually called from 'finalize_options()' for - options that depend on some other command rather than another - option of the same command. 'src_cmd' is the other command from - which option values will be taken (a command object will be created - for it if necessary); the remaining arguments are - '(src_option,dst_option)' tuples which mean "take the value of - 'src_option' in the 'src_cmd' command object, and copy it to - 'dst_option' in the current command object". - """ - # Option_pairs: list of (src_option, dst_option) tuples - src_cmd_obj = self.distribution.get_command_obj(src_cmd) - src_cmd_obj.ensure_finalized() - for (src_option, dst_option) in option_pairs: - if getattr(self, dst_option) is None: - setattr(self, dst_option, getattr(src_cmd_obj, src_option)) - - def get_finalized_command(self, command, create=1): - """Wrapper around Distribution's 'get_command_obj()' method: find - (create if necessary and 'create' is true) the command object for - 'command', call its 'ensure_finalized()' method, and return the - finalized command object. - """ - cmd_obj = self.distribution.get_command_obj(command, create) - cmd_obj.ensure_finalized() - return cmd_obj - - # XXX rename to 'get_reinitialized_command()'? (should do the - # same in dist.py, if so) - def reinitialize_command(self, command, reinit_subcommands=0): - return self.distribution.reinitialize_command(command, - reinit_subcommands) - - def run_command(self, command): - """Run some other command: uses the 'run_command()' method of - Distribution, which creates and finalizes the command object if - necessary and then invokes its 'run()' method. - """ - self.distribution.run_command(command) - - def get_sub_commands(self): - """Determine the sub-commands that are relevant in the current - distribution (ie., that need to be run). This is based on the - 'sub_commands' class attribute: each tuple in that list may include - a method that we call to determine if the subcommand needs to be - run for the current distribution. Return a list of command names. - """ - commands = [] - for (cmd_name, method) in self.sub_commands: - if method is None or method(self): - commands.append(cmd_name) - return commands - - - # -- External world manipulation ----------------------------------- - - def warn(self, msg): - log.warn("warning: %s: %s\n", self.get_command_name(), msg) - - def execute(self, func, args, msg=None, level=1): - util.execute(func, args, msg, dry_run=self.dry_run) - - def mkpath(self, name, mode=0o777): - dir_util.mkpath(name, mode, dry_run=self.dry_run) - - def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1, - link=None, level=1): - """Copy a file respecting verbose, dry-run and force flags. (The - former two default to whatever is in the Distribution object, and - the latter defaults to false for commands that don't define it.)""" - return file_util.copy_file(infile, outfile, preserve_mode, - preserve_times, not self.force, link, - dry_run=self.dry_run) - - def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1, - preserve_symlinks=0, level=1): - """Copy an entire directory tree respecting verbose, dry-run, - and force flags. - """ - return dir_util.copy_tree(infile, outfile, preserve_mode, - preserve_times, preserve_symlinks, - not self.force, dry_run=self.dry_run) - - def move_file (self, src, dst, level=1): - """Move a file respecting dry-run flag.""" - return file_util.move_file(src, dst, dry_run=self.dry_run) - - def spawn(self, cmd, search_path=1, level=1): - """Spawn an external command respecting dry-run flag.""" - from distutils.spawn import spawn - spawn(cmd, search_path, dry_run=self.dry_run) - - def make_archive(self, base_name, format, root_dir=None, base_dir=None, - owner=None, group=None): - return archive_util.make_archive(base_name, format, root_dir, base_dir, - dry_run=self.dry_run, - owner=owner, group=group) - - def make_file(self, infiles, outfile, func, args, - exec_msg=None, skip_msg=None, level=1): - """Special case of 'execute()' for operations that process one or - more input files and generate one output file. Works just like - 'execute()', except the operation is skipped and a different - message printed if 'outfile' already exists and is newer than all - files listed in 'infiles'. If the command defined 'self.force', - and it is true, then the command is unconditionally run -- does no - timestamp checks. - """ - if skip_msg is None: - skip_msg = "skipping %s (inputs unchanged)" % outfile - - # Allow 'infiles' to be a single string - if isinstance(infiles, str): - infiles = (infiles,) - elif not isinstance(infiles, (list, tuple)): - raise TypeError( - "'infiles' must be a string, or a list or tuple of strings") - - if exec_msg is None: - exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles)) - - # If 'outfile' must be regenerated (either because it doesn't - # exist, is out-of-date, or the 'force' flag is true) then - # perform the action that presumably regenerates it - if self.force or dep_util.newer_group(infiles, outfile): - self.execute(func, args, exec_msg, level) - # Otherwise, print the "skip" message - else: - log.debug(skip_msg) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/__init__.py deleted file mode 100755 index 481eea9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands.""" - -__all__ = ['build', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'clean', - 'install', - 'install_lib', - 'install_headers', - 'install_scripts', - 'install_data', - 'sdist', - 'register', - 'bdist', - 'bdist_dumb', - 'bdist_rpm', - 'bdist_wininst', - 'check', - 'upload', - # These two are reserved for future use: - #'bdist_sdux', - #'bdist_pkgtool', - # Note: - # bdist_packager is not included because it only provides - # an abstract base class - ] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist.py deleted file mode 100755 index 014871d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist.py +++ /dev/null @@ -1,143 +0,0 @@ -"""distutils.command.bdist - -Implements the Distutils 'bdist' command (create a built [binary] -distribution).""" - -import os -from distutils.core import Command -from distutils.errors import * -from distutils.util import get_platform - - -def show_formats(): - """Print list of available formats (arguments to "--format" option). - """ - from distutils.fancy_getopt import FancyGetopt - formats = [] - for format in bdist.format_commands: - formats.append(("formats=" + format, None, - bdist.format_command[format][1])) - pretty_printer = FancyGetopt(formats) - pretty_printer.print_help("List of available distribution formats:") - - -class bdist(Command): - - description = "create a built (binary) distribution" - - user_options = [('bdist-base=', 'b', - "temporary directory for creating built distributions"), - ('plat-name=', 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform()), - ('formats=', None, - "formats for distribution (comma-separated list)"), - ('dist-dir=', 'd', - "directory to put final built distributions in " - "[default: dist]"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ('owner=', 'u', - "Owner name used when creating a tar file" - " [default: current user]"), - ('group=', 'g', - "Group name used when creating a tar file" - " [default: current group]"), - ] - - boolean_options = ['skip-build'] - - help_options = [ - ('help-formats', None, - "lists available distribution formats", show_formats), - ] - - # The following commands do not take a format option from bdist - no_format_option = ('bdist_rpm',) - - # This won't do in reality: will need to distinguish RPM-ish Linux, - # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS. - default_format = {'posix': 'gztar', - 'nt': 'zip'} - - # Establish the preferred order (for the --help-formats option). - format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar', - 'wininst', 'zip', 'msi'] - - # And the real information. - format_command = {'rpm': ('bdist_rpm', "RPM distribution"), - 'gztar': ('bdist_dumb', "gzip'ed tar file"), - 'bztar': ('bdist_dumb', "bzip2'ed tar file"), - 'xztar': ('bdist_dumb', "xz'ed tar file"), - 'ztar': ('bdist_dumb', "compressed tar file"), - 'tar': ('bdist_dumb', "tar file"), - 'wininst': ('bdist_wininst', - "Windows executable installer"), - 'zip': ('bdist_dumb', "ZIP file"), - 'msi': ('bdist_msi', "Microsoft Installer") - } - - - def initialize_options(self): - self.bdist_base = None - self.plat_name = None - self.formats = None - self.dist_dir = None - self.skip_build = 0 - self.group = None - self.owner = None - - def finalize_options(self): - # have to finalize 'plat_name' before 'bdist_base' - if self.plat_name is None: - if self.skip_build: - self.plat_name = get_platform() - else: - self.plat_name = self.get_finalized_command('build').plat_name - - # 'bdist_base' -- parent of per-built-distribution-format - # temporary directories (eg. we'll probably have - # "build/bdist./dumb", "build/bdist./rpm", etc.) - if self.bdist_base is None: - build_base = self.get_finalized_command('build').build_base - self.bdist_base = os.path.join(build_base, - 'bdist.' + self.plat_name) - - self.ensure_string_list('formats') - if self.formats is None: - try: - self.formats = [self.default_format[os.name]] - except KeyError: - raise DistutilsPlatformError( - "don't know how to create built distributions " - "on platform %s" % os.name) - - if self.dist_dir is None: - self.dist_dir = "dist" - - def run(self): - # Figure out which sub-commands we need to run. - commands = [] - for format in self.formats: - try: - commands.append(self.format_command[format][0]) - except KeyError: - raise DistutilsOptionError("invalid format '%s'" % format) - - # Reinitialize and run each command. - for i in range(len(self.formats)): - cmd_name = commands[i] - sub_cmd = self.reinitialize_command(cmd_name) - if cmd_name not in self.no_format_option: - sub_cmd.format = self.formats[i] - - # passing the owner and group names for tar archiving - if cmd_name == 'bdist_dumb': - sub_cmd.owner = self.owner - sub_cmd.group = self.group - - # If we're going to need to run this command again, tell it to - # keep its temporary files around so subsequent runs go faster. - if cmd_name in commands[i+1:]: - sub_cmd.keep_temp = 1 - self.run_command(cmd_name) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_dumb.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_dumb.py deleted file mode 100755 index f0d6b5b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_dumb.py +++ /dev/null @@ -1,123 +0,0 @@ -"""distutils.command.bdist_dumb - -Implements the Distutils 'bdist_dumb' command (create a "dumb" built -distribution -- i.e., just an archive to be unpacked under $prefix or -$exec_prefix).""" - -import os -from distutils.core import Command -from distutils.util import get_platform -from distutils.dir_util import remove_tree, ensure_relative -from distutils.errors import * -from distutils.sysconfig import get_python_version -from distutils import log - -class bdist_dumb(Command): - - description = "create a \"dumb\" built distribution" - - user_options = [('bdist-dir=', 'd', - "temporary directory for creating the distribution"), - ('plat-name=', 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform()), - ('format=', 'f', - "archive format to create (tar, gztar, bztar, xztar, " - "ztar, zip)"), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ('relative', None, - "build the archive using relative paths " - "(default: false)"), - ('owner=', 'u', - "Owner name used when creating a tar file" - " [default: current user]"), - ('group=', 'g', - "Group name used when creating a tar file" - " [default: current group]"), - ] - - boolean_options = ['keep-temp', 'skip-build', 'relative'] - - default_format = { 'posix': 'gztar', - 'nt': 'zip' } - - def initialize_options(self): - self.bdist_dir = None - self.plat_name = None - self.format = None - self.keep_temp = 0 - self.dist_dir = None - self.skip_build = None - self.relative = 0 - self.owner = None - self.group = None - - def finalize_options(self): - if self.bdist_dir is None: - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'dumb') - - if self.format is None: - try: - self.format = self.default_format[os.name] - except KeyError: - raise DistutilsPlatformError( - "don't know how to create dumb built distributions " - "on platform %s" % os.name) - - self.set_undefined_options('bdist', - ('dist_dir', 'dist_dir'), - ('plat_name', 'plat_name'), - ('skip_build', 'skip_build')) - - def run(self): - if not self.skip_build: - self.run_command('build') - - install = self.reinitialize_command('install', reinit_subcommands=1) - install.root = self.bdist_dir - install.skip_build = self.skip_build - install.warn_dir = 0 - - log.info("installing to %s", self.bdist_dir) - self.run_command('install') - - # And make an archive relative to the root of the - # pseudo-installation tree. - archive_basename = "%s.%s" % (self.distribution.get_fullname(), - self.plat_name) - - pseudoinstall_root = os.path.join(self.dist_dir, archive_basename) - if not self.relative: - archive_root = self.bdist_dir - else: - if (self.distribution.has_ext_modules() and - (install.install_base != install.install_platbase)): - raise DistutilsPlatformError( - "can't make a dumb built distribution where " - "base and platbase are different (%s, %s)" - % (repr(install.install_base), - repr(install.install_platbase))) - else: - archive_root = os.path.join(self.bdist_dir, - ensure_relative(install.install_base)) - - # Make the archive - filename = self.make_archive(pseudoinstall_root, - self.format, root_dir=archive_root, - owner=self.owner, group=self.group) - if self.distribution.has_ext_modules(): - pyversion = get_python_version() - else: - pyversion = 'any' - self.distribution.dist_files.append(('bdist_dumb', pyversion, - filename)) - - if not self.keep_temp: - remove_tree(self.bdist_dir, dry_run=self.dry_run) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_msi.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_msi.py deleted file mode 100755 index 1525953..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_msi.py +++ /dev/null @@ -1,749 +0,0 @@ -# Copyright (C) 2005, 2006 Martin von Löwis -# Licensed to PSF under a Contributor Agreement. -# The bdist_wininst command proper -# based on bdist_wininst -""" -Implements the bdist_msi command. -""" - -import os -import sys -import warnings -from distutils.core import Command -from distutils.dir_util import remove_tree -from distutils.sysconfig import get_python_version -from distutils.version import StrictVersion -from distutils.errors import DistutilsOptionError -from distutils.util import get_platform -from distutils import log -import msilib -from msilib import schema, sequence, text -from msilib import Directory, Feature, Dialog, add_data - -class PyDialog(Dialog): - """Dialog class with a fixed layout: controls at the top, then a ruler, - then a list of buttons: back, next, cancel. Optionally a bitmap at the - left.""" - def __init__(self, *args, **kw): - """Dialog(database, name, x, y, w, h, attributes, title, first, - default, cancel, bitmap=true)""" - super().__init__(*args) - ruler = self.h - 36 - bmwidth = 152*ruler/328 - #if kw.get("bitmap", True): - # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin") - self.line("BottomLine", 0, ruler, self.w, 0) - - def title(self, title): - "Set the title text of the dialog at the top." - # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix, - # text, in VerdanaBold10 - self.text("Title", 15, 10, 320, 60, 0x30003, - r"{\VerdanaBold10}%s" % title) - - def back(self, title, next, name = "Back", active = 1): - """Add a back button with a given title, the tab-next button, - its name in the Control table, possibly initially disabled. - - Return the button, so that events can be associated""" - if active: - flags = 3 # Visible|Enabled - else: - flags = 1 # Visible - return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next) - - def cancel(self, title, next, name = "Cancel", active = 1): - """Add a cancel button with a given title, the tab-next button, - its name in the Control table, possibly initially disabled. - - Return the button, so that events can be associated""" - if active: - flags = 3 # Visible|Enabled - else: - flags = 1 # Visible - return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next) - - def next(self, title, next, name = "Next", active = 1): - """Add a Next button with a given title, the tab-next button, - its name in the Control table, possibly initially disabled. - - Return the button, so that events can be associated""" - if active: - flags = 3 # Visible|Enabled - else: - flags = 1 # Visible - return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next) - - def xbutton(self, name, title, next, xpos): - """Add a button with a given title, the tab-next button, - its name in the Control table, giving its x position; the - y-position is aligned with the other buttons. - - Return the button, so that events can be associated""" - return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next) - -class bdist_msi(Command): - - description = "create a Microsoft Installer (.msi) binary distribution" - - user_options = [('bdist-dir=', None, - "temporary directory for creating the distribution"), - ('plat-name=', 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform()), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('target-version=', None, - "require a specific python version" + - " on the target system"), - ('no-target-compile', 'c', - "do not compile .py to .pyc on the target system"), - ('no-target-optimize', 'o', - "do not compile .py to .pyo (optimized) " - "on the target system"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ('install-script=', None, - "basename of installation script to be run after " - "installation or before deinstallation"), - ('pre-install-script=', None, - "Fully qualified filename of a script to be run before " - "any files are installed. This script need not be in the " - "distribution"), - ] - - boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize', - 'skip-build'] - - all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4', - '2.5', '2.6', '2.7', '2.8', '2.9', - '3.0', '3.1', '3.2', '3.3', '3.4', - '3.5', '3.6', '3.7', '3.8', '3.9'] - other_version = 'X' - - def __init__(self, *args, **kw): - super().__init__(*args, **kw) - warnings.warn("bdist_msi command is deprecated since Python 3.9, " - "use bdist_wheel (wheel packages) instead", - DeprecationWarning, 2) - - def initialize_options(self): - self.bdist_dir = None - self.plat_name = None - self.keep_temp = 0 - self.no_target_compile = 0 - self.no_target_optimize = 0 - self.target_version = None - self.dist_dir = None - self.skip_build = None - self.install_script = None - self.pre_install_script = None - self.versions = None - - def finalize_options(self): - self.set_undefined_options('bdist', ('skip_build', 'skip_build')) - - if self.bdist_dir is None: - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'msi') - - short_version = get_python_version() - if (not self.target_version) and self.distribution.has_ext_modules(): - self.target_version = short_version - - if self.target_version: - self.versions = [self.target_version] - if not self.skip_build and self.distribution.has_ext_modules()\ - and self.target_version != short_version: - raise DistutilsOptionError( - "target version can only be %s, or the '--skip-build'" - " option must be specified" % (short_version,)) - else: - self.versions = list(self.all_versions) - - self.set_undefined_options('bdist', - ('dist_dir', 'dist_dir'), - ('plat_name', 'plat_name'), - ) - - if self.pre_install_script: - raise DistutilsOptionError( - "the pre-install-script feature is not yet implemented") - - if self.install_script: - for script in self.distribution.scripts: - if self.install_script == os.path.basename(script): - break - else: - raise DistutilsOptionError( - "install_script '%s' not found in scripts" - % self.install_script) - self.install_script_key = None - - def run(self): - if not self.skip_build: - self.run_command('build') - - install = self.reinitialize_command('install', reinit_subcommands=1) - install.prefix = self.bdist_dir - install.skip_build = self.skip_build - install.warn_dir = 0 - - install_lib = self.reinitialize_command('install_lib') - # we do not want to include pyc or pyo files - install_lib.compile = 0 - install_lib.optimize = 0 - - if self.distribution.has_ext_modules(): - # If we are building an installer for a Python version other - # than the one we are currently running, then we need to ensure - # our build_lib reflects the other Python version rather than ours. - # Note that for target_version!=sys.version, we must have skipped the - # build step, so there is no issue with enforcing the build of this - # version. - target_version = self.target_version - if not target_version: - assert self.skip_build, "Should have already checked this" - target_version = '%d.%d' % sys.version_info[:2] - plat_specifier = ".%s-%s" % (self.plat_name, target_version) - build = self.get_finalized_command('build') - build.build_lib = os.path.join(build.build_base, - 'lib' + plat_specifier) - - log.info("installing to %s", self.bdist_dir) - install.ensure_finalized() - - # avoid warning of 'install_lib' about installing - # into a directory not in sys.path - sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB')) - - install.run() - - del sys.path[0] - - self.mkpath(self.dist_dir) - fullname = self.distribution.get_fullname() - installer_name = self.get_installer_filename(fullname) - installer_name = os.path.abspath(installer_name) - if os.path.exists(installer_name): os.unlink(installer_name) - - metadata = self.distribution.metadata - author = metadata.author - if not author: - author = metadata.maintainer - if not author: - author = "UNKNOWN" - version = metadata.get_version() - # ProductVersion must be strictly numeric - # XXX need to deal with prerelease versions - sversion = "%d.%d.%d" % StrictVersion(version).version - # Prefix ProductName with Python x.y, so that - # it sorts together with the other Python packages - # in Add-Remove-Programs (APR) - fullname = self.distribution.get_fullname() - if self.target_version: - product_name = "Python %s %s" % (self.target_version, fullname) - else: - product_name = "Python %s" % (fullname) - self.db = msilib.init_database(installer_name, schema, - product_name, msilib.gen_uuid(), - sversion, author) - msilib.add_tables(self.db, sequence) - props = [('DistVersion', version)] - email = metadata.author_email or metadata.maintainer_email - if email: - props.append(("ARPCONTACT", email)) - if metadata.url: - props.append(("ARPURLINFOABOUT", metadata.url)) - if props: - add_data(self.db, 'Property', props) - - self.add_find_python() - self.add_files() - self.add_scripts() - self.add_ui() - self.db.Commit() - - if hasattr(self.distribution, 'dist_files'): - tup = 'bdist_msi', self.target_version or 'any', fullname - self.distribution.dist_files.append(tup) - - if not self.keep_temp: - remove_tree(self.bdist_dir, dry_run=self.dry_run) - - def add_files(self): - db = self.db - cab = msilib.CAB("distfiles") - rootdir = os.path.abspath(self.bdist_dir) - - root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir") - f = Feature(db, "Python", "Python", "Everything", - 0, 1, directory="TARGETDIR") - - items = [(f, root, '')] - for version in self.versions + [self.other_version]: - target = "TARGETDIR" + version - name = default = "Python" + version - desc = "Everything" - if version is self.other_version: - title = "Python from another location" - level = 2 - else: - title = "Python %s from registry" % version - level = 1 - f = Feature(db, name, title, desc, 1, level, directory=target) - dir = Directory(db, cab, root, rootdir, target, default) - items.append((f, dir, version)) - db.Commit() - - seen = {} - for feature, dir, version in items: - todo = [dir] - while todo: - dir = todo.pop() - for file in os.listdir(dir.absolute): - afile = os.path.join(dir.absolute, file) - if os.path.isdir(afile): - short = "%s|%s" % (dir.make_short(file), file) - default = file + version - newdir = Directory(db, cab, dir, file, default, short) - todo.append(newdir) - else: - if not dir.component: - dir.start_component(dir.logical, feature, 0) - if afile not in seen: - key = seen[afile] = dir.add_file(file) - if file==self.install_script: - if self.install_script_key: - raise DistutilsOptionError( - "Multiple files with name %s" % file) - self.install_script_key = '[#%s]' % key - else: - key = seen[afile] - add_data(self.db, "DuplicateFile", - [(key + version, dir.component, key, None, dir.logical)]) - db.Commit() - cab.commit(db) - - def add_find_python(self): - """Adds code to the installer to compute the location of Python. - - Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the - registry for each version of Python. - - Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, - else from PYTHON.MACHINE.X.Y. - - Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe""" - - start = 402 - for ver in self.versions: - install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver - machine_reg = "python.machine." + ver - user_reg = "python.user." + ver - machine_prop = "PYTHON.MACHINE." + ver - user_prop = "PYTHON.USER." + ver - machine_action = "PythonFromMachine" + ver - user_action = "PythonFromUser" + ver - exe_action = "PythonExe" + ver - target_dir_prop = "TARGETDIR" + ver - exe_prop = "PYTHON" + ver - if msilib.Win64: - # type: msidbLocatorTypeRawValue + msidbLocatorType64bit - Type = 2+16 - else: - Type = 2 - add_data(self.db, "RegLocator", - [(machine_reg, 2, install_path, None, Type), - (user_reg, 1, install_path, None, Type)]) - add_data(self.db, "AppSearch", - [(machine_prop, machine_reg), - (user_prop, user_reg)]) - add_data(self.db, "CustomAction", - [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"), - (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"), - (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"), - ]) - add_data(self.db, "InstallExecuteSequence", - [(machine_action, machine_prop, start), - (user_action, user_prop, start + 1), - (exe_action, None, start + 2), - ]) - add_data(self.db, "InstallUISequence", - [(machine_action, machine_prop, start), - (user_action, user_prop, start + 1), - (exe_action, None, start + 2), - ]) - add_data(self.db, "Condition", - [("Python" + ver, 0, "NOT TARGETDIR" + ver)]) - start += 4 - assert start < 500 - - def add_scripts(self): - if self.install_script: - start = 6800 - for ver in self.versions + [self.other_version]: - install_action = "install_script." + ver - exe_prop = "PYTHON" + ver - add_data(self.db, "CustomAction", - [(install_action, 50, exe_prop, self.install_script_key)]) - add_data(self.db, "InstallExecuteSequence", - [(install_action, "&Python%s=3" % ver, start)]) - start += 1 - # XXX pre-install scripts are currently refused in finalize_options() - # but if this feature is completed, it will also need to add - # entries for each version as the above code does - if self.pre_install_script: - scriptfn = os.path.join(self.bdist_dir, "preinstall.bat") - with open(scriptfn, "w") as f: - # The batch file will be executed with [PYTHON], so that %1 - # is the path to the Python interpreter; %0 will be the path - # of the batch file. - # rem =""" - # %1 %0 - # exit - # """ - # - f.write('rem ="""\n%1 %0\nexit\n"""\n') - with open(self.pre_install_script) as fin: - f.write(fin.read()) - add_data(self.db, "Binary", - [("PreInstall", msilib.Binary(scriptfn)) - ]) - add_data(self.db, "CustomAction", - [("PreInstall", 2, "PreInstall", None) - ]) - add_data(self.db, "InstallExecuteSequence", - [("PreInstall", "NOT Installed", 450)]) - - - def add_ui(self): - db = self.db - x = y = 50 - w = 370 - h = 300 - title = "[ProductName] Setup" - - # see "Dialog Style Bits" - modal = 3 # visible | modal - modeless = 1 # visible - track_disk_space = 32 - - # UI customization properties - add_data(db, "Property", - # See "DefaultUIFont Property" - [("DefaultUIFont", "DlgFont8"), - # See "ErrorDialog Style Bit" - ("ErrorDialog", "ErrorDlg"), - ("Progress1", "Install"), # modified in maintenance type dlg - ("Progress2", "installs"), - ("MaintenanceForm_Action", "Repair"), - # possible values: ALL, JUSTME - ("WhichUsers", "ALL") - ]) - - # Fonts, see "TextStyle Table" - add_data(db, "TextStyle", - [("DlgFont8", "Tahoma", 9, None, 0), - ("DlgFontBold8", "Tahoma", 8, None, 1), #bold - ("VerdanaBold10", "Verdana", 10, None, 1), - ("VerdanaRed9", "Verdana", 9, 255, 0), - ]) - - # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table" - # Numbers indicate sequence; see sequence.py for how these action integrate - add_data(db, "InstallUISequence", - [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140), - ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141), - # In the user interface, assume all-users installation if privileged. - ("SelectFeaturesDlg", "Not Installed", 1230), - # XXX no support for resume installations yet - #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240), - ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250), - ("ProgressDlg", None, 1280)]) - - add_data(db, 'ActionText', text.ActionText) - add_data(db, 'UIText', text.UIText) - ##################################################################### - # Standard dialogs: FatalError, UserExit, ExitDialog - fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title, - "Finish", "Finish", "Finish") - fatal.title("[ProductName] Installer ended prematurely") - fatal.back("< Back", "Finish", active = 0) - fatal.cancel("Cancel", "Back", active = 0) - fatal.text("Description1", 15, 70, 320, 80, 0x30003, - "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.") - fatal.text("Description2", 15, 155, 320, 20, 0x30003, - "Click the Finish button to exit the Installer.") - c=fatal.next("Finish", "Cancel", name="Finish") - c.event("EndDialog", "Exit") - - user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title, - "Finish", "Finish", "Finish") - user_exit.title("[ProductName] Installer was interrupted") - user_exit.back("< Back", "Finish", active = 0) - user_exit.cancel("Cancel", "Back", active = 0) - user_exit.text("Description1", 15, 70, 320, 80, 0x30003, - "[ProductName] setup was interrupted. Your system has not been modified. " - "To install this program at a later time, please run the installation again.") - user_exit.text("Description2", 15, 155, 320, 20, 0x30003, - "Click the Finish button to exit the Installer.") - c = user_exit.next("Finish", "Cancel", name="Finish") - c.event("EndDialog", "Exit") - - exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title, - "Finish", "Finish", "Finish") - exit_dialog.title("Completing the [ProductName] Installer") - exit_dialog.back("< Back", "Finish", active = 0) - exit_dialog.cancel("Cancel", "Back", active = 0) - exit_dialog.text("Description", 15, 235, 320, 20, 0x30003, - "Click the Finish button to exit the Installer.") - c = exit_dialog.next("Finish", "Cancel", name="Finish") - c.event("EndDialog", "Return") - - ##################################################################### - # Required dialog: FilesInUse, ErrorDlg - inuse = PyDialog(db, "FilesInUse", - x, y, w, h, - 19, # KeepModeless|Modal|Visible - title, - "Retry", "Retry", "Retry", bitmap=False) - inuse.text("Title", 15, 6, 200, 15, 0x30003, - r"{\DlgFontBold8}Files in Use") - inuse.text("Description", 20, 23, 280, 20, 0x30003, - "Some files that need to be updated are currently in use.") - inuse.text("Text", 20, 55, 330, 50, 3, - "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.") - inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess", - None, None, None) - c=inuse.back("Exit", "Ignore", name="Exit") - c.event("EndDialog", "Exit") - c=inuse.next("Ignore", "Retry", name="Ignore") - c.event("EndDialog", "Ignore") - c=inuse.cancel("Retry", "Exit", name="Retry") - c.event("EndDialog","Retry") - - # See "Error Dialog". See "ICE20" for the required names of the controls. - error = Dialog(db, "ErrorDlg", - 50, 10, 330, 101, - 65543, # Error|Minimize|Modal|Visible - title, - "ErrorText", None, None) - error.text("ErrorText", 50,9,280,48,3, "") - #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None) - error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo") - error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes") - error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort") - error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel") - error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore") - error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk") - error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry") - - ##################################################################### - # Global "Query Cancel" dialog - cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title, - "No", "No", "No") - cancel.text("Text", 48, 15, 194, 30, 3, - "Are you sure you want to cancel [ProductName] installation?") - #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None, - # "py.ico", None, None) - c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No") - c.event("EndDialog", "Exit") - - c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes") - c.event("EndDialog", "Return") - - ##################################################################### - # Global "Wait for costing" dialog - costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title, - "Return", "Return", "Return") - costing.text("Text", 48, 15, 194, 30, 3, - "Please wait while the installer finishes determining your disk space requirements.") - c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None) - c.event("EndDialog", "Exit") - - ##################################################################### - # Preparation dialog: no user input except cancellation - prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title, - "Cancel", "Cancel", "Cancel") - prep.text("Description", 15, 70, 320, 40, 0x30003, - "Please wait while the Installer prepares to guide you through the installation.") - prep.title("Welcome to the [ProductName] Installer") - c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...") - c.mapping("ActionText", "Text") - c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None) - c.mapping("ActionData", "Text") - prep.back("Back", None, active=0) - prep.next("Next", None, active=0) - c=prep.cancel("Cancel", None) - c.event("SpawnDialog", "CancelDlg") - - ##################################################################### - # Feature (Python directory) selection - seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title, - "Next", "Next", "Cancel") - seldlg.title("Select Python Installations") - - seldlg.text("Hint", 15, 30, 300, 20, 3, - "Select the Python locations where %s should be installed." - % self.distribution.get_fullname()) - - seldlg.back("< Back", None, active=0) - c = seldlg.next("Next >", "Cancel") - order = 1 - c.event("[TARGETDIR]", "[SourceDir]", ordering=order) - for version in self.versions + [self.other_version]: - order += 1 - c.event("[TARGETDIR]", "[TARGETDIR%s]" % version, - "FEATURE_SELECTED AND &Python%s=3" % version, - ordering=order) - c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1) - c.event("EndDialog", "Return", ordering=order + 2) - c = seldlg.cancel("Cancel", "Features") - c.event("SpawnDialog", "CancelDlg") - - c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3, - "FEATURE", None, "PathEdit", None) - c.event("[FEATURE_SELECTED]", "1") - ver = self.other_version - install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver - dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver - - c = seldlg.text("Other", 15, 200, 300, 15, 3, - "Provide an alternate Python location") - c.condition("Enable", install_other_cond) - c.condition("Show", install_other_cond) - c.condition("Disable", dont_install_other_cond) - c.condition("Hide", dont_install_other_cond) - - c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1, - "TARGETDIR" + ver, None, "Next", None) - c.condition("Enable", install_other_cond) - c.condition("Show", install_other_cond) - c.condition("Disable", dont_install_other_cond) - c.condition("Hide", dont_install_other_cond) - - ##################################################################### - # Disk cost - cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title, - "OK", "OK", "OK", bitmap=False) - cost.text("Title", 15, 6, 200, 15, 0x30003, - r"{\DlgFontBold8}Disk Space Requirements") - cost.text("Description", 20, 20, 280, 20, 0x30003, - "The disk space required for the installation of the selected features.") - cost.text("Text", 20, 53, 330, 60, 3, - "The highlighted volumes (if any) do not have enough disk space " - "available for the currently selected features. You can either " - "remove some files from the highlighted volumes, or choose to " - "install less features onto local drive(s), or select different " - "destination drive(s).") - cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223, - None, "{120}{70}{70}{70}{70}", None, None) - cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return") - - ##################################################################### - # WhichUsers Dialog. Only available on NT, and for privileged users. - # This must be run before FindRelatedProducts, because that will - # take into account whether the previous installation was per-user - # or per-machine. We currently don't support going back to this - # dialog after "Next" was selected; to support this, we would need to - # find how to reset the ALLUSERS property, and how to re-run - # FindRelatedProducts. - # On Windows9x, the ALLUSERS property is ignored on the command line - # and in the Property table, but installer fails according to the documentation - # if a dialog attempts to set ALLUSERS. - whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title, - "AdminInstall", "Next", "Cancel") - whichusers.title("Select whether to install [ProductName] for all users of this computer.") - # A radio group with two options: allusers, justme - g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3, - "WhichUsers", "", "Next") - g.add("ALL", 0, 5, 150, 20, "Install for all users") - g.add("JUSTME", 0, 25, 150, 20, "Install just for me") - - whichusers.back("Back", None, active=0) - - c = whichusers.next("Next >", "Cancel") - c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1) - c.event("EndDialog", "Return", ordering = 2) - - c = whichusers.cancel("Cancel", "AdminInstall") - c.event("SpawnDialog", "CancelDlg") - - ##################################################################### - # Installation Progress dialog (modeless) - progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title, - "Cancel", "Cancel", "Cancel", bitmap=False) - progress.text("Title", 20, 15, 200, 15, 0x30003, - r"{\DlgFontBold8}[Progress1] [ProductName]") - progress.text("Text", 35, 65, 300, 30, 3, - "Please wait while the Installer [Progress2] [ProductName]. " - "This may take several minutes.") - progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:") - - c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...") - c.mapping("ActionText", "Text") - - #c=progress.text("ActionData", 35, 140, 300, 20, 3, None) - #c.mapping("ActionData", "Text") - - c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537, - None, "Progress done", None, None) - c.mapping("SetProgress", "Progress") - - progress.back("< Back", "Next", active=False) - progress.next("Next >", "Cancel", active=False) - progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg") - - ################################################################### - # Maintenance type: repair/uninstall - maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title, - "Next", "Next", "Cancel") - maint.title("Welcome to the [ProductName] Setup Wizard") - maint.text("BodyText", 15, 63, 330, 42, 3, - "Select whether you want to repair or remove [ProductName].") - g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3, - "MaintenanceForm_Action", "", "Next") - #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]") - g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]") - g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]") - - maint.back("< Back", None, active=False) - c=maint.next("Finish", "Cancel") - # Change installation: Change progress dialog to "Change", then ask - # for feature selection - #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1) - #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2) - - # Reinstall: Change progress dialog to "Repair", then invoke reinstall - # Also set list of reinstalled features to "ALL" - c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5) - c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6) - c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7) - c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8) - - # Uninstall: Change progress to "Remove", then invoke uninstall - # Also set list of removed features to "ALL" - c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11) - c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12) - c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13) - c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14) - - # Close dialog when maintenance action scheduled - c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20) - #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21) - - maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg") - - def get_installer_filename(self, fullname): - # Factored out to allow overriding in subclasses - if self.target_version: - base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name, - self.target_version) - else: - base_name = "%s.%s.msi" % (fullname, self.plat_name) - installer_name = os.path.join(self.dist_dir, base_name) - return installer_name diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_rpm.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_rpm.py deleted file mode 100755 index 550cbfa..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_rpm.py +++ /dev/null @@ -1,579 +0,0 @@ -"""distutils.command.bdist_rpm - -Implements the Distutils 'bdist_rpm' command (create RPM source and binary -distributions).""" - -import subprocess, sys, os -from distutils.core import Command -from distutils.debug import DEBUG -from distutils.file_util import write_file -from distutils.errors import * -from distutils.sysconfig import get_python_version -from distutils import log - -class bdist_rpm(Command): - - description = "create an RPM distribution" - - user_options = [ - ('bdist-base=', None, - "base directory for creating built distributions"), - ('rpm-base=', None, - "base directory for creating RPMs (defaults to \"rpm\" under " - "--bdist-base; must be specified for RPM 2)"), - ('dist-dir=', 'd', - "directory to put final RPM files in " - "(and .spec files if --spec-only)"), - ('python=', None, - "path to Python interpreter to hard-code in the .spec file " - "(default: \"python\")"), - ('fix-python', None, - "hard-code the exact path to the current Python interpreter in " - "the .spec file"), - ('spec-only', None, - "only regenerate spec file"), - ('source-only', None, - "only generate source RPM"), - ('binary-only', None, - "only generate binary RPM"), - ('use-bzip2', None, - "use bzip2 instead of gzip to create source distribution"), - - # More meta-data: too RPM-specific to put in the setup script, - # but needs to go in the .spec file -- so we make these options - # to "bdist_rpm". The idea is that packagers would put this - # info in setup.cfg, although they are of course free to - # supply it on the command line. - ('distribution-name=', None, - "name of the (Linux) distribution to which this " - "RPM applies (*not* the name of the module distribution!)"), - ('group=', None, - "package classification [default: \"Development/Libraries\"]"), - ('release=', None, - "RPM release number"), - ('serial=', None, - "RPM serial number"), - ('vendor=', None, - "RPM \"vendor\" (eg. \"Joe Blow \") " - "[default: maintainer or author from setup script]"), - ('packager=', None, - "RPM packager (eg. \"Jane Doe \") " - "[default: vendor]"), - ('doc-files=', None, - "list of documentation files (space or comma-separated)"), - ('changelog=', None, - "RPM changelog"), - ('icon=', None, - "name of icon file"), - ('provides=', None, - "capabilities provided by this package"), - ('requires=', None, - "capabilities required by this package"), - ('conflicts=', None, - "capabilities which conflict with this package"), - ('build-requires=', None, - "capabilities required to build this package"), - ('obsoletes=', None, - "capabilities made obsolete by this package"), - ('no-autoreq', None, - "do not automatically calculate dependencies"), - - # Actions to take when building RPM - ('keep-temp', 'k', - "don't clean up RPM build directory"), - ('no-keep-temp', None, - "clean up RPM build directory [default]"), - ('use-rpm-opt-flags', None, - "compile with RPM_OPT_FLAGS when building from source RPM"), - ('no-rpm-opt-flags', None, - "do not pass any RPM CFLAGS to compiler"), - ('rpm3-mode', None, - "RPM 3 compatibility mode (default)"), - ('rpm2-mode', None, - "RPM 2 compatibility mode"), - - # Add the hooks necessary for specifying custom scripts - ('prep-script=', None, - "Specify a script for the PREP phase of RPM building"), - ('build-script=', None, - "Specify a script for the BUILD phase of RPM building"), - - ('pre-install=', None, - "Specify a script for the pre-INSTALL phase of RPM building"), - ('install-script=', None, - "Specify a script for the INSTALL phase of RPM building"), - ('post-install=', None, - "Specify a script for the post-INSTALL phase of RPM building"), - - ('pre-uninstall=', None, - "Specify a script for the pre-UNINSTALL phase of RPM building"), - ('post-uninstall=', None, - "Specify a script for the post-UNINSTALL phase of RPM building"), - - ('clean-script=', None, - "Specify a script for the CLEAN phase of RPM building"), - - ('verify-script=', None, - "Specify a script for the VERIFY phase of the RPM build"), - - # Allow a packager to explicitly force an architecture - ('force-arch=', None, - "Force an architecture onto the RPM build process"), - - ('quiet', 'q', - "Run the INSTALL phase of RPM building in quiet mode"), - ] - - boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode', - 'no-autoreq', 'quiet'] - - negative_opt = {'no-keep-temp': 'keep-temp', - 'no-rpm-opt-flags': 'use-rpm-opt-flags', - 'rpm2-mode': 'rpm3-mode'} - - - def initialize_options(self): - self.bdist_base = None - self.rpm_base = None - self.dist_dir = None - self.python = None - self.fix_python = None - self.spec_only = None - self.binary_only = None - self.source_only = None - self.use_bzip2 = None - - self.distribution_name = None - self.group = None - self.release = None - self.serial = None - self.vendor = None - self.packager = None - self.doc_files = None - self.changelog = None - self.icon = None - - self.prep_script = None - self.build_script = None - self.install_script = None - self.clean_script = None - self.verify_script = None - self.pre_install = None - self.post_install = None - self.pre_uninstall = None - self.post_uninstall = None - self.prep = None - self.provides = None - self.requires = None - self.conflicts = None - self.build_requires = None - self.obsoletes = None - - self.keep_temp = 0 - self.use_rpm_opt_flags = 1 - self.rpm3_mode = 1 - self.no_autoreq = 0 - - self.force_arch = None - self.quiet = 0 - - def finalize_options(self): - self.set_undefined_options('bdist', ('bdist_base', 'bdist_base')) - if self.rpm_base is None: - if not self.rpm3_mode: - raise DistutilsOptionError( - "you must specify --rpm-base in RPM 2 mode") - self.rpm_base = os.path.join(self.bdist_base, "rpm") - - if self.python is None: - if self.fix_python: - self.python = sys.executable - else: - self.python = "python3" - elif self.fix_python: - raise DistutilsOptionError( - "--python and --fix-python are mutually exclusive options") - - if os.name != 'posix': - raise DistutilsPlatformError("don't know how to create RPM " - "distributions on platform %s" % os.name) - if self.binary_only and self.source_only: - raise DistutilsOptionError( - "cannot supply both '--source-only' and '--binary-only'") - - # don't pass CFLAGS to pure python distributions - if not self.distribution.has_ext_modules(): - self.use_rpm_opt_flags = 0 - - self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) - self.finalize_package_data() - - def finalize_package_data(self): - self.ensure_string('group', "Development/Libraries") - self.ensure_string('vendor', - "%s <%s>" % (self.distribution.get_contact(), - self.distribution.get_contact_email())) - self.ensure_string('packager') - self.ensure_string_list('doc_files') - if isinstance(self.doc_files, list): - for readme in ('README', 'README.txt'): - if os.path.exists(readme) and readme not in self.doc_files: - self.doc_files.append(readme) - - self.ensure_string('release', "1") - self.ensure_string('serial') # should it be an int? - - self.ensure_string('distribution_name') - - self.ensure_string('changelog') - # Format changelog correctly - self.changelog = self._format_changelog(self.changelog) - - self.ensure_filename('icon') - - self.ensure_filename('prep_script') - self.ensure_filename('build_script') - self.ensure_filename('install_script') - self.ensure_filename('clean_script') - self.ensure_filename('verify_script') - self.ensure_filename('pre_install') - self.ensure_filename('post_install') - self.ensure_filename('pre_uninstall') - self.ensure_filename('post_uninstall') - - # XXX don't forget we punted on summaries and descriptions -- they - # should be handled here eventually! - - # Now *this* is some meta-data that belongs in the setup script... - self.ensure_string_list('provides') - self.ensure_string_list('requires') - self.ensure_string_list('conflicts') - self.ensure_string_list('build_requires') - self.ensure_string_list('obsoletes') - - self.ensure_string('force_arch') - - def run(self): - if DEBUG: - print("before _get_package_data():") - print("vendor =", self.vendor) - print("packager =", self.packager) - print("doc_files =", self.doc_files) - print("changelog =", self.changelog) - - # make directories - if self.spec_only: - spec_dir = self.dist_dir - self.mkpath(spec_dir) - else: - rpm_dir = {} - for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'): - rpm_dir[d] = os.path.join(self.rpm_base, d) - self.mkpath(rpm_dir[d]) - spec_dir = rpm_dir['SPECS'] - - # Spec file goes into 'dist_dir' if '--spec-only specified', - # build/rpm. otherwise. - spec_path = os.path.join(spec_dir, - "%s.spec" % self.distribution.get_name()) - self.execute(write_file, - (spec_path, - self._make_spec_file()), - "writing '%s'" % spec_path) - - if self.spec_only: # stop if requested - return - - # Make a source distribution and copy to SOURCES directory with - # optional icon. - saved_dist_files = self.distribution.dist_files[:] - sdist = self.reinitialize_command('sdist') - if self.use_bzip2: - sdist.formats = ['bztar'] - else: - sdist.formats = ['gztar'] - self.run_command('sdist') - self.distribution.dist_files = saved_dist_files - - source = sdist.get_archive_files()[0] - source_dir = rpm_dir['SOURCES'] - self.copy_file(source, source_dir) - - if self.icon: - if os.path.exists(self.icon): - self.copy_file(self.icon, source_dir) - else: - raise DistutilsFileError( - "icon file '%s' does not exist" % self.icon) - - # build package - log.info("building RPMs") - rpm_cmd = ['rpmbuild'] - - if self.source_only: # what kind of RPMs? - rpm_cmd.append('-bs') - elif self.binary_only: - rpm_cmd.append('-bb') - else: - rpm_cmd.append('-ba') - rpm_cmd.extend(['--define', '__python %s' % self.python]) - if self.rpm3_mode: - rpm_cmd.extend(['--define', - '_topdir %s' % os.path.abspath(self.rpm_base)]) - if not self.keep_temp: - rpm_cmd.append('--clean') - - if self.quiet: - rpm_cmd.append('--quiet') - - rpm_cmd.append(spec_path) - # Determine the binary rpm names that should be built out of this spec - # file - # Note that some of these may not be really built (if the file - # list is empty) - nvr_string = "%{name}-%{version}-%{release}" - src_rpm = nvr_string + ".src.rpm" - non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm" - q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % ( - src_rpm, non_src_rpm, spec_path) - - out = os.popen(q_cmd) - try: - binary_rpms = [] - source_rpm = None - while True: - line = out.readline() - if not line: - break - l = line.strip().split() - assert(len(l) == 2) - binary_rpms.append(l[1]) - # The source rpm is named after the first entry in the spec file - if source_rpm is None: - source_rpm = l[0] - - status = out.close() - if status: - raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd)) - - finally: - out.close() - - self.spawn(rpm_cmd) - - if not self.dry_run: - if self.distribution.has_ext_modules(): - pyversion = get_python_version() - else: - pyversion = 'any' - - if not self.binary_only: - srpm = os.path.join(rpm_dir['SRPMS'], source_rpm) - assert(os.path.exists(srpm)) - self.move_file(srpm, self.dist_dir) - filename = os.path.join(self.dist_dir, source_rpm) - self.distribution.dist_files.append( - ('bdist_rpm', pyversion, filename)) - - if not self.source_only: - for rpm in binary_rpms: - rpm = os.path.join(rpm_dir['RPMS'], rpm) - if os.path.exists(rpm): - self.move_file(rpm, self.dist_dir) - filename = os.path.join(self.dist_dir, - os.path.basename(rpm)) - self.distribution.dist_files.append( - ('bdist_rpm', pyversion, filename)) - - def _dist_path(self, path): - return os.path.join(self.dist_dir, os.path.basename(path)) - - def _make_spec_file(self): - """Generate the text of an RPM spec file and return it as a - list of strings (one per line). - """ - # definitions and headers - spec_file = [ - '%define name ' + self.distribution.get_name(), - '%define version ' + self.distribution.get_version().replace('-','_'), - '%define unmangled_version ' + self.distribution.get_version(), - '%define release ' + self.release.replace('-','_'), - '', - 'Summary: ' + self.distribution.get_description(), - ] - - # Workaround for #14443 which affects some RPM based systems such as - # RHEL6 (and probably derivatives) - vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}') - # Generate a potential replacement value for __os_install_post (whilst - # normalizing the whitespace to simplify the test for whether the - # invocation of brp-python-bytecompile passes in __python): - vendor_hook = '\n'.join([' %s \\' % line.strip() - for line in vendor_hook.splitlines()]) - problem = "brp-python-bytecompile \\\n" - fixed = "brp-python-bytecompile %{__python} \\\n" - fixed_hook = vendor_hook.replace(problem, fixed) - if fixed_hook != vendor_hook: - spec_file.append('# Workaround for http://bugs.python.org/issue14443') - spec_file.append('%define __os_install_post ' + fixed_hook + '\n') - - # put locale summaries into spec file - # XXX not supported for now (hard to put a dictionary - # in a config file -- arg!) - #for locale in self.summaries.keys(): - # spec_file.append('Summary(%s): %s' % (locale, - # self.summaries[locale])) - - spec_file.extend([ - 'Name: %{name}', - 'Version: %{version}', - 'Release: %{release}',]) - - # XXX yuck! this filename is available from the "sdist" command, - # but only after it has run: and we create the spec file before - # running "sdist", in case of --spec-only. - if self.use_bzip2: - spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2') - else: - spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz') - - spec_file.extend([ - 'License: ' + self.distribution.get_license(), - 'Group: ' + self.group, - 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot', - 'Prefix: %{_prefix}', ]) - - if not self.force_arch: - # noarch if no extension modules - if not self.distribution.has_ext_modules(): - spec_file.append('BuildArch: noarch') - else: - spec_file.append( 'BuildArch: %s' % self.force_arch ) - - for field in ('Vendor', - 'Packager', - 'Provides', - 'Requires', - 'Conflicts', - 'Obsoletes', - ): - val = getattr(self, field.lower()) - if isinstance(val, list): - spec_file.append('%s: %s' % (field, ' '.join(val))) - elif val is not None: - spec_file.append('%s: %s' % (field, val)) - - - if self.distribution.get_url() != 'UNKNOWN': - spec_file.append('Url: ' + self.distribution.get_url()) - - if self.distribution_name: - spec_file.append('Distribution: ' + self.distribution_name) - - if self.build_requires: - spec_file.append('BuildRequires: ' + - ' '.join(self.build_requires)) - - if self.icon: - spec_file.append('Icon: ' + os.path.basename(self.icon)) - - if self.no_autoreq: - spec_file.append('AutoReq: 0') - - spec_file.extend([ - '', - '%description', - self.distribution.get_long_description() - ]) - - # put locale descriptions into spec file - # XXX again, suppressed because config file syntax doesn't - # easily support this ;-( - #for locale in self.descriptions.keys(): - # spec_file.extend([ - # '', - # '%description -l ' + locale, - # self.descriptions[locale], - # ]) - - # rpm scripts - # figure out default build script - def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0])) - def_build = "%s build" % def_setup_call - if self.use_rpm_opt_flags: - def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build - - # insert contents of files - - # XXX this is kind of misleading: user-supplied options are files - # that we open and interpolate into the spec file, but the defaults - # are just text that we drop in as-is. Hmmm. - - install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT ' - '--record=INSTALLED_FILES') % def_setup_call - - script_options = [ - ('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"), - ('build', 'build_script', def_build), - ('install', 'install_script', install_cmd), - ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"), - ('verifyscript', 'verify_script', None), - ('pre', 'pre_install', None), - ('post', 'post_install', None), - ('preun', 'pre_uninstall', None), - ('postun', 'post_uninstall', None), - ] - - for (rpm_opt, attr, default) in script_options: - # Insert contents of file referred to, if no file is referred to - # use 'default' as contents of script - val = getattr(self, attr) - if val or default: - spec_file.extend([ - '', - '%' + rpm_opt,]) - if val: - with open(val) as f: - spec_file.extend(f.read().split('\n')) - else: - spec_file.append(default) - - - # files section - spec_file.extend([ - '', - '%files -f INSTALLED_FILES', - '%defattr(-,root,root)', - ]) - - if self.doc_files: - spec_file.append('%doc ' + ' '.join(self.doc_files)) - - if self.changelog: - spec_file.extend([ - '', - '%changelog',]) - spec_file.extend(self.changelog) - - return spec_file - - def _format_changelog(self, changelog): - """Format the changelog correctly and convert it to a list of strings - """ - if not changelog: - return changelog - new_changelog = [] - for line in changelog.strip().split('\n'): - line = line.strip() - if line[0] == '*': - new_changelog.extend(['', line]) - elif line[0] == '-': - new_changelog.append(line) - else: - new_changelog.append(' ' + line) - - # strip trailing newline inserted by first changelog entry - if not new_changelog[0]: - del new_changelog[0] - - return new_changelog diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_wininst.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_wininst.py deleted file mode 100755 index 0e9ddaa..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/bdist_wininst.py +++ /dev/null @@ -1,377 +0,0 @@ -"""distutils.command.bdist_wininst - -Implements the Distutils 'bdist_wininst' command: create a windows installer -exe-program.""" - -import os -import sys -import warnings -from distutils.core import Command -from distutils.util import get_platform -from distutils.dir_util import remove_tree -from distutils.errors import * -from distutils.sysconfig import get_python_version -from distutils import log - -class bdist_wininst(Command): - - description = "create an executable installer for MS Windows" - - user_options = [('bdist-dir=', None, - "temporary directory for creating the distribution"), - ('plat-name=', 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform()), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('target-version=', None, - "require a specific python version" + - " on the target system"), - ('no-target-compile', 'c', - "do not compile .py to .pyc on the target system"), - ('no-target-optimize', 'o', - "do not compile .py to .pyo (optimized) " - "on the target system"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('bitmap=', 'b', - "bitmap to use for the installer instead of python-powered logo"), - ('title=', 't', - "title to display on the installer background instead of default"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ('install-script=', None, - "basename of installation script to be run after " - "installation or before deinstallation"), - ('pre-install-script=', None, - "Fully qualified filename of a script to be run before " - "any files are installed. This script need not be in the " - "distribution"), - ('user-access-control=', None, - "specify Vista's UAC handling - 'none'/default=no " - "handling, 'auto'=use UAC if target Python installed for " - "all users, 'force'=always use UAC"), - ] - - boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize', - 'skip-build'] - - # bpo-10945: bdist_wininst requires mbcs encoding only available on Windows - _unsupported = (sys.platform != "win32") - - def __init__(self, *args, **kw): - super().__init__(*args, **kw) - warnings.warn("bdist_wininst command is deprecated since Python 3.8, " - "use bdist_wheel (wheel packages) instead", - DeprecationWarning, 2) - - def initialize_options(self): - self.bdist_dir = None - self.plat_name = None - self.keep_temp = 0 - self.no_target_compile = 0 - self.no_target_optimize = 0 - self.target_version = None - self.dist_dir = None - self.bitmap = None - self.title = None - self.skip_build = None - self.install_script = None - self.pre_install_script = None - self.user_access_control = None - - - def finalize_options(self): - self.set_undefined_options('bdist', ('skip_build', 'skip_build')) - - if self.bdist_dir is None: - if self.skip_build and self.plat_name: - # If build is skipped and plat_name is overridden, bdist will - # not see the correct 'plat_name' - so set that up manually. - bdist = self.distribution.get_command_obj('bdist') - bdist.plat_name = self.plat_name - # next the command will be initialized using that name - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'wininst') - - if not self.target_version: - self.target_version = "" - - if not self.skip_build and self.distribution.has_ext_modules(): - short_version = get_python_version() - if self.target_version and self.target_version != short_version: - raise DistutilsOptionError( - "target version can only be %s, or the '--skip-build'" \ - " option must be specified" % (short_version,)) - self.target_version = short_version - - self.set_undefined_options('bdist', - ('dist_dir', 'dist_dir'), - ('plat_name', 'plat_name'), - ) - - if self.install_script: - for script in self.distribution.scripts: - if self.install_script == os.path.basename(script): - break - else: - raise DistutilsOptionError( - "install_script '%s' not found in scripts" - % self.install_script) - - def run(self): - if (sys.platform != "win32" and - (self.distribution.has_ext_modules() or - self.distribution.has_c_libraries())): - raise DistutilsPlatformError \ - ("distribution contains extensions and/or C libraries; " - "must be compiled on a Windows 32 platform") - - if not self.skip_build: - self.run_command('build') - - install = self.reinitialize_command('install', reinit_subcommands=1) - install.root = self.bdist_dir - install.skip_build = self.skip_build - install.warn_dir = 0 - install.plat_name = self.plat_name - - install_lib = self.reinitialize_command('install_lib') - # we do not want to include pyc or pyo files - install_lib.compile = 0 - install_lib.optimize = 0 - - if self.distribution.has_ext_modules(): - # If we are building an installer for a Python version other - # than the one we are currently running, then we need to ensure - # our build_lib reflects the other Python version rather than ours. - # Note that for target_version!=sys.version, we must have skipped the - # build step, so there is no issue with enforcing the build of this - # version. - target_version = self.target_version - if not target_version: - assert self.skip_build, "Should have already checked this" - target_version = '%d.%d' % sys.version_info[:2] - plat_specifier = ".%s-%s" % (self.plat_name, target_version) - build = self.get_finalized_command('build') - build.build_lib = os.path.join(build.build_base, - 'lib' + plat_specifier) - - # Use a custom scheme for the zip-file, because we have to decide - # at installation time which scheme to use. - for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'): - value = key.upper() - if key == 'headers': - value = value + '/Include/$dist_name' - setattr(install, - 'install_' + key, - value) - - log.info("installing to %s", self.bdist_dir) - install.ensure_finalized() - - # avoid warning of 'install_lib' about installing - # into a directory not in sys.path - sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB')) - - install.run() - - del sys.path[0] - - # And make an archive relative to the root of the - # pseudo-installation tree. - from tempfile import mktemp - archive_basename = mktemp() - fullname = self.distribution.get_fullname() - arcname = self.make_archive(archive_basename, "zip", - root_dir=self.bdist_dir) - # create an exe containing the zip-file - self.create_exe(arcname, fullname, self.bitmap) - if self.distribution.has_ext_modules(): - pyversion = get_python_version() - else: - pyversion = 'any' - self.distribution.dist_files.append(('bdist_wininst', pyversion, - self.get_installer_filename(fullname))) - # remove the zip-file again - log.debug("removing temporary file '%s'", arcname) - os.remove(arcname) - - if not self.keep_temp: - remove_tree(self.bdist_dir, dry_run=self.dry_run) - - def get_inidata(self): - # Return data describing the installation. - lines = [] - metadata = self.distribution.metadata - - # Write the [metadata] section. - lines.append("[metadata]") - - # 'info' will be displayed in the installer's dialog box, - # describing the items to be installed. - info = (metadata.long_description or '') + '\n' - - # Escape newline characters - def escape(s): - return s.replace("\n", "\\n") - - for name in ["author", "author_email", "description", "maintainer", - "maintainer_email", "name", "url", "version"]: - data = getattr(metadata, name, "") - if data: - info = info + ("\n %s: %s" % \ - (name.capitalize(), escape(data))) - lines.append("%s=%s" % (name, escape(data))) - - # The [setup] section contains entries controlling - # the installer runtime. - lines.append("\n[Setup]") - if self.install_script: - lines.append("install_script=%s" % self.install_script) - lines.append("info=%s" % escape(info)) - lines.append("target_compile=%d" % (not self.no_target_compile)) - lines.append("target_optimize=%d" % (not self.no_target_optimize)) - if self.target_version: - lines.append("target_version=%s" % self.target_version) - if self.user_access_control: - lines.append("user_access_control=%s" % self.user_access_control) - - title = self.title or self.distribution.get_fullname() - lines.append("title=%s" % escape(title)) - import time - import distutils - build_info = "Built %s with distutils-%s" % \ - (time.ctime(time.time()), distutils.__version__) - lines.append("build_info=%s" % build_info) - return "\n".join(lines) - - def create_exe(self, arcname, fullname, bitmap=None): - import struct - - self.mkpath(self.dist_dir) - - cfgdata = self.get_inidata() - - installer_name = self.get_installer_filename(fullname) - self.announce("creating %s" % installer_name) - - if bitmap: - with open(bitmap, "rb") as f: - bitmapdata = f.read() - bitmaplen = len(bitmapdata) - else: - bitmaplen = 0 - - with open(installer_name, "wb") as file: - file.write(self.get_exe_bytes()) - if bitmap: - file.write(bitmapdata) - - # Convert cfgdata from unicode to ascii, mbcs encoded - if isinstance(cfgdata, str): - cfgdata = cfgdata.encode("mbcs") - - # Append the pre-install script - cfgdata = cfgdata + b"\0" - if self.pre_install_script: - # We need to normalize newlines, so we open in text mode and - # convert back to bytes. "latin-1" simply avoids any possible - # failures. - with open(self.pre_install_script, "r", - encoding="latin-1") as script: - script_data = script.read().encode("latin-1") - cfgdata = cfgdata + script_data + b"\n\0" - else: - # empty pre-install script - cfgdata = cfgdata + b"\0" - file.write(cfgdata) - - # The 'magic number' 0x1234567B is used to make sure that the - # binary layout of 'cfgdata' is what the wininst.exe binary - # expects. If the layout changes, increment that number, make - # the corresponding changes to the wininst.exe sources, and - # recompile them. - header = struct.pack("' under the base build directory. We only use one of - # them for a given distribution, though -- - if self.build_purelib is None: - self.build_purelib = os.path.join(self.build_base, 'lib') - if self.build_platlib is None: - self.build_platlib = os.path.join(self.build_base, - 'lib' + plat_specifier) - - # 'build_lib' is the actual directory that we will use for this - # particular module distribution -- if user didn't supply it, pick - # one of 'build_purelib' or 'build_platlib'. - if self.build_lib is None: - if self.distribution.has_ext_modules(): - self.build_lib = self.build_platlib - else: - self.build_lib = self.build_purelib - - # 'build_temp' -- temporary directory for compiler turds, - # "build/temp." - if self.build_temp is None: - self.build_temp = os.path.join(self.build_base, - 'temp' + plat_specifier) - if self.build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts-%d.%d' % sys.version_info[:2]) - - if self.executable is None and sys.executable: - self.executable = os.path.normpath(sys.executable) - - if isinstance(self.parallel, str): - try: - self.parallel = int(self.parallel) - except ValueError: - raise DistutilsOptionError("parallel should be an integer") - - def run(self): - # Run all relevant sub-commands. This will be some subset of: - # - build_py - pure Python modules - # - build_clib - standalone C libraries - # - build_ext - Python extensions - # - build_scripts - (Python) scripts - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - - # -- Predicates for the sub-command list --------------------------- - - def has_pure_modules(self): - return self.distribution.has_pure_modules() - - def has_c_libraries(self): - return self.distribution.has_c_libraries() - - def has_ext_modules(self): - return self.distribution.has_ext_modules() - - def has_scripts(self): - return self.distribution.has_scripts() - - - sub_commands = [('build_py', has_pure_modules), - ('build_clib', has_c_libraries), - ('build_ext', has_ext_modules), - ('build_scripts', has_scripts), - ] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_clib.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_clib.py deleted file mode 100755 index 3e20ef2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_clib.py +++ /dev/null @@ -1,209 +0,0 @@ -"""distutils.command.build_clib - -Implements the Distutils 'build_clib' command, to build a C/C++ library -that is included in the module distribution and needed by an extension -module.""" - - -# XXX this module has *lots* of code ripped-off quite transparently from -# build_ext.py -- not surprisingly really, as the work required to build -# a static library from a collection of C source files is not really all -# that different from what's required to build a shared object file from -# a collection of C source files. Nevertheless, I haven't done the -# necessary refactoring to account for the overlap in code between the -# two modules, mainly because a number of subtle details changed in the -# cut 'n paste. Sigh. - -import os -from distutils.core import Command -from distutils.errors import * -from distutils.sysconfig import customize_compiler -from distutils import log - -def show_compilers(): - from distutils.ccompiler import show_compilers - show_compilers() - - -class build_clib(Command): - - description = "build C/C++ libraries used by Python extensions" - - user_options = [ - ('build-clib=', 'b', - "directory to build C/C++ libraries to"), - ('build-temp=', 't', - "directory to put temporary build by-products"), - ('debug', 'g', - "compile with debugging information"), - ('force', 'f', - "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', - "specify the compiler type"), - ] - - boolean_options = ['debug', 'force'] - - help_options = [ - ('help-compiler', None, - "list available compilers", show_compilers), - ] - - def initialize_options(self): - self.build_clib = None - self.build_temp = None - - # List of libraries to build - self.libraries = None - - # Compilation options for all libraries - self.include_dirs = None - self.define = None - self.undef = None - self.debug = None - self.force = 0 - self.compiler = None - - - def finalize_options(self): - # This might be confusing: both build-clib and build-temp default - # to build-temp as defined by the "build" command. This is because - # I think that C libraries are really just temporary build - # by-products, at least from the point of view of building Python - # extensions -- but I want to keep my options open. - self.set_undefined_options('build', - ('build_temp', 'build_clib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force')) - - self.libraries = self.distribution.libraries - if self.libraries: - self.check_library_list(self.libraries) - - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - # XXX same as for build_ext -- what about 'self.define' and - # 'self.undef' ? - - - def run(self): - if not self.libraries: - return - - # Yech -- this is cut 'n pasted from build_ext.py! - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - customize_compiler(self.compiler) - - if self.include_dirs is not None: - self.compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name,value) in self.define: - self.compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - self.compiler.undefine_macro(macro) - - self.build_libraries(self.libraries) - - - def check_library_list(self, libraries): - """Ensure that the list of libraries is valid. - - `library` is presumably provided as a command option 'libraries'. - This method checks that it is a list of 2-tuples, where the tuples - are (library_name, build_info_dict). - - Raise DistutilsSetupError if the structure is invalid anywhere; - just returns otherwise. - """ - if not isinstance(libraries, list): - raise DistutilsSetupError( - "'libraries' option must be a list of tuples") - - for lib in libraries: - if not isinstance(lib, tuple) and len(lib) != 2: - raise DistutilsSetupError( - "each element of 'libraries' must a 2-tuple") - - name, build_info = lib - - if not isinstance(name, str): - raise DistutilsSetupError( - "first element of each tuple in 'libraries' " - "must be a string (the library name)") - - if '/' in name or (os.sep != '/' and os.sep in name): - raise DistutilsSetupError("bad library name '%s': " - "may not contain directory separators" % lib[0]) - - if not isinstance(build_info, dict): - raise DistutilsSetupError( - "second element of each tuple in 'libraries' " - "must be a dictionary (build info)") - - - def get_library_names(self): - # Assume the library list is valid -- 'check_library_list()' is - # called from 'finalize_options()', so it should be! - if not self.libraries: - return None - - lib_names = [] - for (lib_name, build_info) in self.libraries: - lib_names.append(lib_name) - return lib_names - - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for (lib_name, build_info) in self.libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name) - - filenames.extend(sources) - return filenames - - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name) - sources = list(sources) - - log.info("building '%s' library", lib_name) - - # First, compile the source code to object files in the library - # directory. (This should probably change to putting object - # files in a temporary build directory.) - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - objects = self.compiler.compile(sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug) - - # Now "link" the object files together into a static library. - # (On Unix at least, this isn't really linking -- it just - # builds an archive. Whatever.) - self.compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_ext.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_ext.py deleted file mode 100755 index 181671b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_ext.py +++ /dev/null @@ -1,755 +0,0 @@ -"""distutils.command.build_ext - -Implements the Distutils 'build_ext' command, for building extension -modules (currently limited to C extensions, should accommodate C++ -extensions ASAP).""" - -import contextlib -import os -import re -import sys -from distutils.core import Command -from distutils.errors import * -from distutils.sysconfig import customize_compiler, get_python_version -from distutils.sysconfig import get_config_h_filename -from distutils.dep_util import newer_group -from distutils.extension import Extension -from distutils.util import get_platform -from distutils import log -from . import py37compat - -from site import USER_BASE - -# An extension name is just a dot-separated list of Python NAMEs (ie. -# the same as a fully-qualified module name). -extension_name_re = re.compile \ - (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$') - - -def show_compilers (): - from distutils.ccompiler import show_compilers - show_compilers() - - -class build_ext(Command): - - description = "build C/C++ extensions (compile/link to build directory)" - - # XXX thoughts on how to deal with complex command-line options like - # these, i.e. how to make it so fancy_getopt can suck them off the - # command line and make it look like setup.py defined the appropriate - # lists of tuples of what-have-you. - # - each command needs a callback to process its command-line options - # - Command.__init__() needs access to its share of the whole - # command line (must ultimately come from - # Distribution.parse_command_line()) - # - it then calls the current command class' option-parsing - # callback to deal with weird options like -D, which have to - # parse the option text and churn out some custom data - # structure - # - that data structure (in this case, a list of 2-tuples) - # will then be present in the command object by the time - # we get to finalize_options() (i.e. the constructor - # takes care of both command-line and client options - # in between initialize_options() and finalize_options()) - - sep_by = " (separated by '%s')" % os.pathsep - user_options = [ - ('build-lib=', 'b', - "directory for compiled extension modules"), - ('build-temp=', 't', - "directory for temporary files (build by-products)"), - ('plat-name=', 'p', - "platform name to cross-compile for, if supported " - "(default: %s)" % get_platform()), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " + - "directory alongside your pure Python modules"), - ('include-dirs=', 'I', - "list of directories to search for header files" + sep_by), - ('define=', 'D', - "C preprocessor macros to define"), - ('undef=', 'U', - "C preprocessor macros to undefine"), - ('libraries=', 'l', - "external C libraries to link with"), - ('library-dirs=', 'L', - "directories to search for external C libraries" + sep_by), - ('rpath=', 'R', - "directories to search for shared C libraries at runtime"), - ('link-objects=', 'O', - "extra explicit link objects to include in the link"), - ('debug', 'g', - "compile/link with debugging information"), - ('force', 'f', - "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', - "specify the compiler type"), - ('parallel=', 'j', - "number of parallel build jobs"), - ('swig-cpp', None, - "make SWIG create C++ files (default is C)"), - ('swig-opts=', None, - "list of SWIG command line options"), - ('swig=', None, - "path to the SWIG executable"), - ('user', None, - "add user include, library and rpath") - ] - - boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user'] - - help_options = [ - ('help-compiler', None, - "list available compilers", show_compilers), - ] - - def initialize_options(self): - self.extensions = None - self.build_lib = None - self.plat_name = None - self.build_temp = None - self.inplace = 0 - self.package = None - - self.include_dirs = None - self.define = None - self.undef = None - self.libraries = None - self.library_dirs = None - self.rpath = None - self.link_objects = None - self.debug = None - self.force = None - self.compiler = None - self.swig = None - self.swig_cpp = None - self.swig_opts = None - self.user = None - self.parallel = None - - def finalize_options(self): - from distutils import sysconfig - - self.set_undefined_options('build', - ('build_lib', 'build_lib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force'), - ('parallel', 'parallel'), - ('plat_name', 'plat_name'), - ) - - if self.package is None: - self.package = self.distribution.ext_package - - self.extensions = self.distribution.ext_modules - - # Make sure Python's include directories (for Python.h, pyconfig.h, - # etc.) are in the include search path. - py_include = sysconfig.get_python_inc() - plat_py_include = sysconfig.get_python_inc(plat_specific=1) - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - # If in a virtualenv, add its include directory - # Issue 16116 - if sys.exec_prefix != sys.base_exec_prefix: - self.include_dirs.append(os.path.join(sys.exec_prefix, 'include')) - - # Put the Python "system" include dir at the end, so that - # any local include dirs take precedence. - self.include_dirs.extend(py_include.split(os.path.pathsep)) - if plat_py_include != py_include: - self.include_dirs.extend( - plat_py_include.split(os.path.pathsep)) - - self.ensure_string_list('libraries') - self.ensure_string_list('link_objects') - - # Life is easier if we're not forever checking for None, so - # simplify these options to empty lists if unset - if self.libraries is None: - self.libraries = [] - if self.library_dirs is None: - self.library_dirs = [] - elif isinstance(self.library_dirs, str): - self.library_dirs = self.library_dirs.split(os.pathsep) - - if self.rpath is None: - self.rpath = [] - elif isinstance(self.rpath, str): - self.rpath = self.rpath.split(os.pathsep) - - # for extensions under windows use different directories - # for Release and Debug builds. - # also Python's library directory must be appended to library_dirs - if os.name == 'nt': - # the 'libs' directory is for binary installs - we assume that - # must be the *native* platform. But we don't really support - # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) - if sys.base_exec_prefix != sys.prefix: # Issue 16116 - self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs')) - if self.debug: - self.build_temp = os.path.join(self.build_temp, "Debug") - else: - self.build_temp = os.path.join(self.build_temp, "Release") - - # Append the source distribution include and library directories, - # this allows distutils on windows to work in the source tree - self.include_dirs.append(os.path.dirname(get_config_h_filename())) - self.library_dirs.append(sys.base_exec_prefix) - - # Use the .lib files for the correct architecture - if self.plat_name == 'win32': - suffix = 'win32' - else: - # win-amd64 - suffix = self.plat_name[4:] - new_lib = os.path.join(sys.exec_prefix, 'PCbuild') - if suffix: - new_lib = os.path.join(new_lib, suffix) - self.library_dirs.append(new_lib) - - # For extensions under Cygwin, Python's library directory must be - # appended to library_dirs - if sys.platform[:6] == 'cygwin': - if not sysconfig.python_build: - # building third party extensions - self.library_dirs.append(os.path.join(sys.prefix, "lib", - "python" + get_python_version(), - "config")) - else: - # building python standard extensions - self.library_dirs.append('.') - - # For building extensions with a shared Python library, - # Python's library directory must be appended to library_dirs - # See Issues: #1600860, #4366 - if (sysconfig.get_config_var('Py_ENABLE_SHARED')): - if not sysconfig.python_build: - # building third party extensions - self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) - else: - # building python standard extensions - self.library_dirs.append('.') - - # The argument parsing will result in self.define being a string, but - # it has to be a list of 2-tuples. All the preprocessor symbols - # specified by the 'define' option will be set to '1'. Multiple - # symbols can be separated with commas. - - if self.define: - defines = self.define.split(',') - self.define = [(symbol, '1') for symbol in defines] - - # The option for macros to undefine is also a string from the - # option parsing, but has to be a list. Multiple symbols can also - # be separated with commas here. - if self.undef: - self.undef = self.undef.split(',') - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = self.swig_opts.split(' ') - - # Finally add the user include and library directories if requested - if self.user: - user_include = os.path.join(USER_BASE, "include") - user_lib = os.path.join(USER_BASE, "lib") - if os.path.isdir(user_include): - self.include_dirs.append(user_include) - if os.path.isdir(user_lib): - self.library_dirs.append(user_lib) - self.rpath.append(user_lib) - - if isinstance(self.parallel, str): - try: - self.parallel = int(self.parallel) - except ValueError: - raise DistutilsOptionError("parallel should be an integer") - - def run(self): - from distutils.ccompiler import new_compiler - - # 'self.extensions', as supplied by setup.py, is a list of - # Extension instances. See the documentation for Extension (in - # distutils.extension) for details. - # - # For backwards compatibility with Distutils 0.8.2 and earlier, we - # also allow the 'extensions' list to be a list of tuples: - # (ext_name, build_info) - # where build_info is a dictionary containing everything that - # Extension instances do except the name, with a few things being - # differently named. We convert these 2-tuples to Extension - # instances as needed. - - if not self.extensions: - return - - # If we were asked to build any C/C++ libraries, make sure that the - # directory where we put them is in the library search path for - # linking extensions. - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.libraries.extend(build_clib.get_library_names() or []) - self.library_dirs.append(build_clib.build_clib) - - # Setup the CCompiler object that we'll use to do all the - # compiling and linking - self.compiler = new_compiler(compiler=self.compiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - customize_compiler(self.compiler) - # If we are cross-compiling, init the compiler now (if we are not - # cross-compiling, init would not hurt, but people may rely on - # late initialization of compiler even if they shouldn't...) - if os.name == 'nt' and self.plat_name != get_platform(): - self.compiler.initialize(self.plat_name) - - # And make sure that any compile/link-related options (which might - # come from the command-line or from the setup script) are set in - # that CCompiler object -- that way, they automatically apply to - # all compiling and linking done here. - if self.include_dirs is not None: - self.compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name, value) in self.define: - self.compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - self.compiler.undefine_macro(macro) - if self.libraries is not None: - self.compiler.set_libraries(self.libraries) - if self.library_dirs is not None: - self.compiler.set_library_dirs(self.library_dirs) - if self.rpath is not None: - self.compiler.set_runtime_library_dirs(self.rpath) - if self.link_objects is not None: - self.compiler.set_link_objects(self.link_objects) - - # Now actually compile and link everything. - self.build_extensions() - - def check_extensions_list(self, extensions): - """Ensure that the list of extensions (presumably provided as a - command option 'extensions') is valid, i.e. it is a list of - Extension objects. We also support the old-style list of 2-tuples, - where the tuples are (ext_name, build_info), which are converted to - Extension instances here. - - Raise DistutilsSetupError if the structure is invalid anywhere; - just returns otherwise. - """ - if not isinstance(extensions, list): - raise DistutilsSetupError( - "'ext_modules' option must be a list of Extension instances") - - for i, ext in enumerate(extensions): - if isinstance(ext, Extension): - continue # OK! (assume type-checking done - # by Extension constructor) - - if not isinstance(ext, tuple) or len(ext) != 2: - raise DistutilsSetupError( - "each element of 'ext_modules' option must be an " - "Extension instance or 2-tuple") - - ext_name, build_info = ext - - log.warn("old-style (ext_name, build_info) tuple found in " - "ext_modules for extension '%s' " - "-- please convert to Extension instance", ext_name) - - if not (isinstance(ext_name, str) and - extension_name_re.match(ext_name)): - raise DistutilsSetupError( - "first element of each tuple in 'ext_modules' " - "must be the extension name (a string)") - - if not isinstance(build_info, dict): - raise DistutilsSetupError( - "second element of each tuple in 'ext_modules' " - "must be a dictionary (build info)") - - # OK, the (ext_name, build_info) dict is type-safe: convert it - # to an Extension instance. - ext = Extension(ext_name, build_info['sources']) - - # Easy stuff: one-to-one mapping from dict elements to - # instance attributes. - for key in ('include_dirs', 'library_dirs', 'libraries', - 'extra_objects', 'extra_compile_args', - 'extra_link_args'): - val = build_info.get(key) - if val is not None: - setattr(ext, key, val) - - # Medium-easy stuff: same syntax/semantics, different names. - ext.runtime_library_dirs = build_info.get('rpath') - if 'def_file' in build_info: - log.warn("'def_file' element of build info dict " - "no longer supported") - - # Non-trivial stuff: 'macros' split into 'define_macros' - # and 'undef_macros'. - macros = build_info.get('macros') - if macros: - ext.define_macros = [] - ext.undef_macros = [] - for macro in macros: - if not (isinstance(macro, tuple) and len(macro) in (1, 2)): - raise DistutilsSetupError( - "'macros' element of build info dict " - "must be 1- or 2-tuple") - if len(macro) == 1: - ext.undef_macros.append(macro[0]) - elif len(macro) == 2: - ext.define_macros.append(macro) - - extensions[i] = ext - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - - # Wouldn't it be neat if we knew the names of header files too... - for ext in self.extensions: - filenames.extend(ext.sources) - return filenames - - def get_outputs(self): - # Sanity check the 'extensions' list -- can't assume this is being - # done in the same run as a 'build_extensions()' call (in fact, we - # can probably assume that it *isn't*!). - self.check_extensions_list(self.extensions) - - # And build the list of output (built) filenames. Note that this - # ignores the 'inplace' flag, and assumes everything goes in the - # "build" tree. - outputs = [] - for ext in self.extensions: - outputs.append(self.get_ext_fullpath(ext.name)) - return outputs - - def build_extensions(self): - # First, sanity-check the 'extensions' list - self.check_extensions_list(self.extensions) - if self.parallel: - self._build_extensions_parallel() - else: - self._build_extensions_serial() - - def _build_extensions_parallel(self): - workers = self.parallel - if self.parallel is True: - workers = os.cpu_count() # may return None - try: - from concurrent.futures import ThreadPoolExecutor - except ImportError: - workers = None - - if workers is None: - self._build_extensions_serial() - return - - with ThreadPoolExecutor(max_workers=workers) as executor: - futures = [executor.submit(self.build_extension, ext) - for ext in self.extensions] - for ext, fut in zip(self.extensions, futures): - with self._filter_build_errors(ext): - fut.result() - - def _build_extensions_serial(self): - for ext in self.extensions: - with self._filter_build_errors(ext): - self.build_extension(ext) - - @contextlib.contextmanager - def _filter_build_errors(self, ext): - try: - yield - except (CCompilerError, DistutilsError, CompileError) as e: - if not ext.optional: - raise - self.warn('building extension "%s" failed: %s' % - (ext.name, e)) - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'ext_modules' option (extension '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % ext.name) - # sort to make the resulting .so file build reproducible - sources = sorted(sources) - - ext_path = self.get_ext_fullpath(ext.name) - depends = sources + ext.depends - if not (self.force or newer_group(depends, ext_path, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - # First, scan the sources for SWIG definition files (.i), run - # SWIG on 'em to create .c files, and modify the sources list - # accordingly. - sources = self.swig_sources(sources, ext) - - # Next, compile the source code to object files. - - # XXX not honouring 'define_macros' or 'undef_macros' -- the - # CCompiler API needs to change to accommodate this, and I - # want to do one thing at a time! - - # Two possible sources for extra compiler arguments: - # - 'extra_compile_args' in Extension object - # - CFLAGS environment variable (not particularly - # elegant, but people seem to expect it and I - # guess it's useful) - # The environment variable should take precedence, and - # any sensible compiler will give precedence to later - # command line args. Hence we combine them in order: - extra_args = ext.extra_compile_args or [] - - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - objects = self.compiler.compile(sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=ext.include_dirs, - debug=self.debug, - extra_postargs=extra_args, - depends=ext.depends) - - # XXX outdated variable, kept here in case third-part code - # needs it. - self._built_objects = objects[:] - - # Now link the object files together into a "shared object" -- - # of course, first we have to figure out all the other things - # that go into the mix. - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - - # Detect target language, if not provided - language = ext.language or self.compiler.detect_language(sources) - - self.compiler.link_shared_object( - objects, ext_path, - libraries=self.get_libraries(ext), - library_dirs=ext.library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=language) - - def swig_sources(self, sources, extension): - """Walk the list of source files in 'sources', looking for SWIG - interface (.i) files. Run SWIG on all that are found, and - return a modified 'sources' list with SWIG source files replaced - by the generated C (or C++) files. - """ - new_sources = [] - swig_sources = [] - swig_targets = {} - - # XXX this drops generated C/C++ files into the source tree, which - # is fine for developers who want to distribute the generated - # source -- but there should be an option to put SWIG output in - # the temp dir. - - if self.swig_cpp: - log.warn("--swig-cpp is deprecated - use --swig-opts=-c++") - - if self.swig_cpp or ('-c++' in self.swig_opts) or \ - ('-c++' in extension.swig_opts): - target_ext = '.cpp' - else: - target_ext = '.c' - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == ".i": # SWIG interface file - new_sources.append(base + '_wrap' + target_ext) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] - swig_cmd.extend(self.swig_opts) - if self.swig_cpp: - swig_cmd.append("-c++") - - # Do not override commandline arguments - if not self.swig_opts: - for o in extension.swig_opts: - swig_cmd.append(o) - - for source in swig_sources: - target = swig_targets[source] - log.info("swigging %s to %s", source, target) - self.spawn(swig_cmd + ["-o", target, source]) - - return new_sources - - def find_swig(self): - """Return the name of the SWIG executable. On Unix, this is - just "swig" -- it should be in the PATH. Tries a bit harder on - Windows. - """ - if os.name == "posix": - return "swig" - elif os.name == "nt": - # Look for SWIG in its standard installation directory on - # Windows (or so I presume!). If we find it there, great; - # if not, act like Unix and assume it's in the PATH. - for vers in ("1.3", "1.2", "1.1"): - fn = os.path.join("c:\\swig%s" % vers, "swig.exe") - if os.path.isfile(fn): - return fn - else: - return "swig.exe" - else: - raise DistutilsPlatformError( - "I don't know how to find (much less run) SWIG " - "on platform '%s'" % os.name) - - # -- Name generators ----------------------------------------------- - # (extension names, filenames, whatever) - def get_ext_fullpath(self, ext_name): - """Returns the path of the filename for a given extension. - - The file is located in `build_lib` or directly in the package - (inplace option). - """ - fullname = self.get_ext_fullname(ext_name) - modpath = fullname.split('.') - filename = self.get_ext_filename(modpath[-1]) - - if not self.inplace: - # no further work needed - # returning : - # build_dir/package/path/filename - filename = os.path.join(*modpath[:-1]+[filename]) - return os.path.join(self.build_lib, filename) - - # the inplace option requires to find the package directory - # using the build_py command for that - package = '.'.join(modpath[0:-1]) - build_py = self.get_finalized_command('build_py') - package_dir = os.path.abspath(build_py.get_package_dir(package)) - - # returning - # package_dir/filename - return os.path.join(package_dir, filename) - - def get_ext_fullname(self, ext_name): - """Returns the fullname of a given extension name. - - Adds the `package.` prefix""" - if self.package is None: - return ext_name - else: - return self.package + '.' + ext_name - - def get_ext_filename(self, ext_name): - r"""Convert the name of an extension (eg. "foo.bar") into the name - of the file from which it will be loaded (eg. "foo/bar.so", or - "foo\bar.pyd"). - """ - from distutils.sysconfig import get_config_var - ext_path = ext_name.split('.') - ext_suffix = get_config_var('EXT_SUFFIX') - return os.path.join(*ext_path) + ext_suffix - - def get_export_symbols(self, ext): - """Return the list of symbols that a shared extension has to - export. This either uses 'ext.export_symbols' or, if it's not - provided, "PyInit_" + module_name. Only relevant on Windows, where - the .pyd file (DLL) must export the module "PyInit_" function. - """ - name = ext.name.split('.')[-1] - try: - # Unicode module name support as defined in PEP-489 - # https://www.python.org/dev/peps/pep-0489/#export-hook-name - name.encode('ascii') - except UnicodeEncodeError: - suffix = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii') - else: - suffix = "_" + name - - initfunc_name = "PyInit" + suffix - if initfunc_name not in ext.export_symbols: - ext.export_symbols.append(initfunc_name) - return ext.export_symbols - - def get_libraries(self, ext): - """Return the list of libraries to link against when building a - shared extension. On most platforms, this is just 'ext.libraries'; - on Windows, we add the Python library (eg. python20.dll). - """ - # The python library is always needed on Windows. For MSVC, this - # is redundant, since the library is mentioned in a pragma in - # pyconfig.h that MSVC groks. The other Windows compilers all seem - # to need it mentioned explicitly, though, so that's what we do. - # Append '_d' to the python import library on debug builds. - if sys.platform == "win32": - from distutils._msvccompiler import MSVCCompiler - if not isinstance(self.compiler, MSVCCompiler): - template = "python%d%d" - if self.debug: - template = template + '_d' - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - # don't extend ext.libraries, it may be shared with other - # extensions, it is a reference to the original list - return ext.libraries + [pythonlib] - else: - # On Android only the main executable and LD_PRELOADs are considered - # to be RTLD_GLOBAL, all the dependencies of the main executable - # remain RTLD_LOCAL and so the shared libraries must be linked with - # libpython when python is built with a shared python library (issue - # bpo-21536). - # On Cygwin (and if required, other POSIX-like platforms based on - # Windows like MinGW) it is simply necessary that all symbols in - # shared libraries are resolved at link time. - from distutils.sysconfig import get_config_var - link_libpython = False - if get_config_var('Py_ENABLE_SHARED'): - # A native build on an Android device or on Cygwin - if hasattr(sys, 'getandroidapilevel'): - link_libpython = True - elif sys.platform == 'cygwin': - link_libpython = True - elif '_PYTHON_HOST_PLATFORM' in os.environ: - # We are cross-compiling for one of the relevant platforms - if get_config_var('ANDROID_API_LEVEL') != 0: - link_libpython = True - elif get_config_var('MACHDEP') == 'cygwin': - link_libpython = True - - if link_libpython: - ldversion = get_config_var('LDVERSION') - return ext.libraries + ['python' + ldversion] - - return ext.libraries + py37compat.pythonlib() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_py.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_py.py deleted file mode 100755 index 7ef9bce..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_py.py +++ /dev/null @@ -1,392 +0,0 @@ -"""distutils.command.build_py - -Implements the Distutils 'build_py' command.""" - -import os -import importlib.util -import sys -import glob - -from distutils.core import Command -from distutils.errors import * -from distutils.util import convert_path -from distutils import log - -class build_py (Command): - - description = "\"build\" pure Python modules (copy to build directory)" - - user_options = [ - ('build-lib=', 'd', "directory to \"build\" (copy) to"), - ('compile', 'c', "compile .py to .pyc"), - ('no-compile', None, "don't compile .py files [default]"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ] - - boolean_options = ['compile', 'force'] - negative_opt = {'no-compile' : 'compile'} - - def initialize_options(self): - self.build_lib = None - self.py_modules = None - self.package = None - self.package_data = None - self.package_dir = None - self.compile = 0 - self.optimize = 0 - self.force = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_lib', 'build_lib'), - ('force', 'force')) - - # Get the distribution options that are aliases for build_py - # options -- list of packages and list of modules. - self.packages = self.distribution.packages - self.py_modules = self.distribution.py_modules - self.package_data = self.distribution.package_data - self.package_dir = {} - if self.distribution.package_dir: - for name, path in self.distribution.package_dir.items(): - self.package_dir[name] = convert_path(path) - self.data_files = self.get_data_files() - - # Ick, copied straight from install_lib.py (fancy_getopt needs a - # type system! Hell, *everything* needs a type system!!!) - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - assert 0 <= self.optimize <= 2 - except (ValueError, AssertionError): - raise DistutilsOptionError("optimize must be 0, 1, or 2") - - def run(self): - # XXX copy_file by default preserves atime and mtime. IMHO this is - # the right thing to do, but perhaps it should be an option -- in - # particular, a site administrator might want installed files to - # reflect the time of installation rather than the last - # modification time before the installed release. - - # XXX copy_file by default preserves mode, which appears to be the - # wrong thing to do: if a file is read-only in the working - # directory, we want it to be installed read/write so that the next - # installation of the same module distribution can overwrite it - # without problems. (This might be a Unix-specific issue.) Thus - # we turn off 'preserve_mode' when copying to the build directory, - # since the build directory is supposed to be exactly what the - # installation will look like (ie. we preserve mode when - # installing). - - # Two options control which modules will be installed: 'packages' - # and 'py_modules'. The former lets us work with whole packages, not - # specifying individual modules at all; the latter is for - # specifying modules one-at-a-time. - - if self.py_modules: - self.build_modules() - if self.packages: - self.build_packages() - self.build_package_data() - - self.byte_compile(self.get_outputs(include_bytecode=0)) - - def get_data_files(self): - """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" - data = [] - if not self.packages: - return data - for package in self.packages: - # Locate package source directory - src_dir = self.get_package_dir(package) - - # Compute package build directory - build_dir = os.path.join(*([self.build_lib] + package.split('.'))) - - # Length of path to strip from found files - plen = 0 - if src_dir: - plen = len(src_dir)+1 - - # Strip directory from globbed filenames - filenames = [ - file[plen:] for file in self.find_data_files(package, src_dir) - ] - data.append((package, src_dir, build_dir, filenames)) - return data - - def find_data_files(self, package, src_dir): - """Return filenames for package's data files in 'src_dir'""" - globs = (self.package_data.get('', []) - + self.package_data.get(package, [])) - files = [] - for pattern in globs: - # Each pattern has to be converted to a platform-specific path - filelist = glob.glob(os.path.join(glob.escape(src_dir), convert_path(pattern))) - # Files that match more than one pattern are only added once - files.extend([fn for fn in filelist if fn not in files - and os.path.isfile(fn)]) - return files - - def build_package_data(self): - """Copy data files into build directory""" - lastdir = None - for package, src_dir, build_dir, filenames in self.data_files: - for filename in filenames: - target = os.path.join(build_dir, filename) - self.mkpath(os.path.dirname(target)) - self.copy_file(os.path.join(src_dir, filename), target, - preserve_mode=False) - - def get_package_dir(self, package): - """Return the directory, relative to the top of the source - distribution, where package 'package' should be found - (at least according to the 'package_dir' option, if any).""" - path = package.split('.') - - if not self.package_dir: - if path: - return os.path.join(*path) - else: - return '' - else: - tail = [] - while path: - try: - pdir = self.package_dir['.'.join(path)] - except KeyError: - tail.insert(0, path[-1]) - del path[-1] - else: - tail.insert(0, pdir) - return os.path.join(*tail) - else: - # Oops, got all the way through 'path' without finding a - # match in package_dir. If package_dir defines a directory - # for the root (nameless) package, then fallback on it; - # otherwise, we might as well have not consulted - # package_dir at all, as we just use the directory implied - # by 'tail' (which should be the same as the original value - # of 'path' at this point). - pdir = self.package_dir.get('') - if pdir is not None: - tail.insert(0, pdir) - - if tail: - return os.path.join(*tail) - else: - return '' - - def check_package(self, package, package_dir): - # Empty dir name means current directory, which we can probably - # assume exists. Also, os.path.exists and isdir don't know about - # my "empty string means current dir" convention, so we have to - # circumvent them. - if package_dir != "": - if not os.path.exists(package_dir): - raise DistutilsFileError( - "package directory '%s' does not exist" % package_dir) - if not os.path.isdir(package_dir): - raise DistutilsFileError( - "supposed package directory '%s' exists, " - "but is not a directory" % package_dir) - - # Require __init__.py for all but the "root package" - if package: - init_py = os.path.join(package_dir, "__init__.py") - if os.path.isfile(init_py): - return init_py - else: - log.warn(("package init file '%s' not found " + - "(or not a regular file)"), init_py) - - # Either not in a package at all (__init__.py not expected), or - # __init__.py doesn't exist -- so don't return the filename. - return None - - def check_module(self, module, module_file): - if not os.path.isfile(module_file): - log.warn("file %s (for module %s) not found", module_file, module) - return False - else: - return True - - def find_package_modules(self, package, package_dir): - self.check_package(package, package_dir) - module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py")) - modules = [] - setup_script = os.path.abspath(self.distribution.script_name) - - for f in module_files: - abs_f = os.path.abspath(f) - if abs_f != setup_script: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - else: - self.debug_print("excluding %s" % setup_script) - return modules - - def find_modules(self): - """Finds individually-specified Python modules, ie. those listed by - module name in 'self.py_modules'. Returns a list of tuples (package, - module_base, filename): 'package' is a tuple of the path through - package-space to the module; 'module_base' is the bare (no - packages, no dots) module name, and 'filename' is the path to the - ".py" file (relative to the distribution root) that implements the - module. - """ - # Map package names to tuples of useful info about the package: - # (package_dir, checked) - # package_dir - the directory where we'll find source files for - # this package - # checked - true if we have checked that the package directory - # is valid (exists, contains __init__.py, ... ?) - packages = {} - - # List of (package, module, filename) tuples to return - modules = [] - - # We treat modules-in-packages almost the same as toplevel modules, - # just the "package" for a toplevel is empty (either an empty - # string or empty list, depending on context). Differences: - # - don't check for __init__.py in directory for empty package - for module in self.py_modules: - path = module.split('.') - package = '.'.join(path[0:-1]) - module_base = path[-1] - - try: - (package_dir, checked) = packages[package] - except KeyError: - package_dir = self.get_package_dir(package) - checked = 0 - - if not checked: - init_py = self.check_package(package, package_dir) - packages[package] = (package_dir, 1) - if init_py: - modules.append((package, "__init__", init_py)) - - # XXX perhaps we should also check for just .pyc files - # (so greedy closed-source bastards can distribute Python - # modules too) - module_file = os.path.join(package_dir, module_base + ".py") - if not self.check_module(module, module_file): - continue - - modules.append((package, module_base, module_file)) - - return modules - - def find_all_modules(self): - """Compute the list of all modules that will be built, whether - they are specified one-module-at-a-time ('self.py_modules') or - by whole packages ('self.packages'). Return a list of tuples - (package, module, module_file), just like 'find_modules()' and - 'find_package_modules()' do.""" - modules = [] - if self.py_modules: - modules.extend(self.find_modules()) - if self.packages: - for package in self.packages: - package_dir = self.get_package_dir(package) - m = self.find_package_modules(package, package_dir) - modules.extend(m) - return modules - - def get_source_files(self): - return [module[-1] for module in self.find_all_modules()] - - def get_module_outfile(self, build_dir, package, module): - outfile_path = [build_dir] + list(package) + [module + ".py"] - return os.path.join(*outfile_path) - - def get_outputs(self, include_bytecode=1): - modules = self.find_all_modules() - outputs = [] - for (package, module, module_file) in modules: - package = package.split('.') - filename = self.get_module_outfile(self.build_lib, package, module) - outputs.append(filename) - if include_bytecode: - if self.compile: - outputs.append(importlib.util.cache_from_source( - filename, optimization='')) - if self.optimize > 0: - outputs.append(importlib.util.cache_from_source( - filename, optimization=self.optimize)) - - outputs += [ - os.path.join(build_dir, filename) - for package, src_dir, build_dir, filenames in self.data_files - for filename in filenames - ] - - return outputs - - def build_module(self, module, module_file, package): - if isinstance(package, str): - package = package.split('.') - elif not isinstance(package, (list, tuple)): - raise TypeError( - "'package' must be a string (dot-separated), list, or tuple") - - # Now put the module source file into the "build" area -- this is - # easy, we just copy it somewhere under self.build_lib (the build - # directory for Python source). - outfile = self.get_module_outfile(self.build_lib, package, module) - dir = os.path.dirname(outfile) - self.mkpath(dir) - return self.copy_file(module_file, outfile, preserve_mode=0) - - def build_modules(self): - modules = self.find_modules() - for (package, module, module_file) in modules: - # Now "build" the module -- ie. copy the source file to - # self.build_lib (the build directory for Python source). - # (Actually, it gets copied to the directory for this package - # under self.build_lib.) - self.build_module(module, module_file, package) - - def build_packages(self): - for package in self.packages: - # Get list of (package, module, module_file) tuples based on - # scanning the package directory. 'package' is only included - # in the tuple so that 'find_modules()' and - # 'find_package_tuples()' have a consistent interface; it's - # ignored here (apart from a sanity check). Also, 'module' is - # the *unqualified* module name (ie. no dots, no package -- we - # already know its package!), and 'module_file' is the path to - # the .py file, relative to the current directory - # (ie. including 'package_dir'). - package_dir = self.get_package_dir(package) - modules = self.find_package_modules(package, package_dir) - - # Now loop over the modules we found, "building" each one (just - # copy it to self.build_lib). - for (package_, module, module_file) in modules: - assert package == package_ - self.build_module(module, module_file, package) - - def byte_compile(self, files): - if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') - return - - from distutils.util import byte_compile - prefix = self.build_lib - if prefix[-1] != os.sep: - prefix = prefix + os.sep - - # XXX this code is essentially the same as the 'byte_compile() - # method of the "install_lib" command, except for the determination - # of the 'prefix' string. Hmmm. - if self.compile: - byte_compile(files, optimize=0, - force=self.force, prefix=prefix, dry_run=self.dry_run) - if self.optimize > 0: - byte_compile(files, optimize=self.optimize, - force=self.force, prefix=prefix, dry_run=self.dry_run) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_scripts.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_scripts.py deleted file mode 100755 index e3312cf..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/build_scripts.py +++ /dev/null @@ -1,152 +0,0 @@ -"""distutils.command.build_scripts - -Implements the Distutils 'build_scripts' command.""" - -import os, re -from stat import ST_MODE -from distutils import sysconfig -from distutils.core import Command -from distutils.dep_util import newer -from distutils.util import convert_path -from distutils import log -import tokenize - -# check if Python is called on the first line with this expression -first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$') - -class build_scripts(Command): - - description = "\"build\" scripts (copy and fixup #! line)" - - user_options = [ - ('build-dir=', 'd', "directory to \"build\" (copy) to"), - ('force', 'f', "forcibly build everything (ignore file timestamps"), - ('executable=', 'e', "specify final destination interpreter path"), - ] - - boolean_options = ['force'] - - - def initialize_options(self): - self.build_dir = None - self.scripts = None - self.force = None - self.executable = None - self.outfiles = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_scripts', 'build_dir'), - ('force', 'force'), - ('executable', 'executable')) - self.scripts = self.distribution.scripts - - def get_source_files(self): - return self.scripts - - def run(self): - if not self.scripts: - return - self.copy_scripts() - - - def copy_scripts(self): - r"""Copy each script listed in 'self.scripts'; if it's marked as a - Python script in the Unix way (first line matches 'first_line_re', - ie. starts with "\#!" and contains "python"), then adjust the first - line to refer to the current Python interpreter as we copy. - """ - self.mkpath(self.build_dir) - outfiles = [] - updated_files = [] - for script in self.scripts: - adjust = False - script = convert_path(script) - outfile = os.path.join(self.build_dir, os.path.basename(script)) - outfiles.append(outfile) - - if not self.force and not newer(script, outfile): - log.debug("not copying %s (up-to-date)", script) - continue - - # Always open the file, but ignore failures in dry-run mode -- - # that way, we'll get accurate feedback if we can read the - # script. - try: - f = open(script, "rb") - except OSError: - if not self.dry_run: - raise - f = None - else: - encoding, lines = tokenize.detect_encoding(f.readline) - f.seek(0) - first_line = f.readline() - if not first_line: - self.warn("%s is an empty file (skipping)" % script) - continue - - match = first_line_re.match(first_line) - if match: - adjust = True - post_interp = match.group(1) or b'' - - if adjust: - log.info("copying and adjusting %s -> %s", script, - self.build_dir) - updated_files.append(outfile) - if not self.dry_run: - if not sysconfig.python_build: - executable = self.executable - else: - executable = os.path.join( - sysconfig.get_config_var("BINDIR"), - "python%s%s" % (sysconfig.get_config_var("VERSION"), - sysconfig.get_config_var("EXE"))) - executable = os.fsencode(executable) - shebang = b"#!" + executable + post_interp + b"\n" - # Python parser starts to read a script using UTF-8 until - # it gets a #coding:xxx cookie. The shebang has to be the - # first line of a file, the #coding:xxx cookie cannot be - # written before. So the shebang has to be decodable from - # UTF-8. - try: - shebang.decode('utf-8') - except UnicodeDecodeError: - raise ValueError( - "The shebang ({!r}) is not decodable " - "from utf-8".format(shebang)) - # If the script is encoded to a custom encoding (use a - # #coding:xxx cookie), the shebang has to be decodable from - # the script encoding too. - try: - shebang.decode(encoding) - except UnicodeDecodeError: - raise ValueError( - "The shebang ({!r}) is not decodable " - "from the script encoding ({})" - .format(shebang, encoding)) - with open(outfile, "wb") as outf: - outf.write(shebang) - outf.writelines(f.readlines()) - if f: - f.close() - else: - if f: - f.close() - updated_files.append(outfile) - self.copy_file(script, outfile) - - if os.name == 'posix': - for file in outfiles: - if self.dry_run: - log.info("changing mode of %s", file) - else: - oldmode = os.stat(file)[ST_MODE] & 0o7777 - newmode = (oldmode | 0o555) & 0o7777 - if newmode != oldmode: - log.info("changing mode of %s from %o to %o", - file, oldmode, newmode) - os.chmod(file, newmode) - # XXX should we modify self.outfiles? - return outfiles, updated_files diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/check.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/check.py deleted file mode 100755 index 525540b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/check.py +++ /dev/null @@ -1,148 +0,0 @@ -"""distutils.command.check - -Implements the Distutils 'check' command. -""" -from distutils.core import Command -from distutils.errors import DistutilsSetupError - -try: - # docutils is installed - from docutils.utils import Reporter - from docutils.parsers.rst import Parser - from docutils import frontend - from docutils import nodes - - class SilentReporter(Reporter): - - def __init__(self, source, report_level, halt_level, stream=None, - debug=0, encoding='ascii', error_handler='replace'): - self.messages = [] - super().__init__(source, report_level, halt_level, stream, - debug, encoding, error_handler) - - def system_message(self, level, message, *children, **kwargs): - self.messages.append((level, message, children, kwargs)) - return nodes.system_message(message, level=level, - type=self.levels[level], - *children, **kwargs) - - HAS_DOCUTILS = True -except Exception: - # Catch all exceptions because exceptions besides ImportError probably - # indicate that docutils is not ported to Py3k. - HAS_DOCUTILS = False - -class check(Command): - """This command checks the meta-data of the package. - """ - description = ("perform some checks on the package") - user_options = [('metadata', 'm', 'Verify meta-data'), - ('restructuredtext', 'r', - ('Checks if long string meta-data syntax ' - 'are reStructuredText-compliant')), - ('strict', 's', - 'Will exit with an error if a check fails')] - - boolean_options = ['metadata', 'restructuredtext', 'strict'] - - def initialize_options(self): - """Sets default values for options.""" - self.restructuredtext = 0 - self.metadata = 1 - self.strict = 0 - self._warnings = 0 - - def finalize_options(self): - pass - - def warn(self, msg): - """Counts the number of warnings that occurs.""" - self._warnings += 1 - return Command.warn(self, msg) - - def run(self): - """Runs the command.""" - # perform the various tests - if self.metadata: - self.check_metadata() - if self.restructuredtext: - if HAS_DOCUTILS: - self.check_restructuredtext() - elif self.strict: - raise DistutilsSetupError('The docutils package is needed.') - - # let's raise an error in strict mode, if we have at least - # one warning - if self.strict and self._warnings > 0: - raise DistutilsSetupError('Please correct your package.') - - def check_metadata(self): - """Ensures that all required elements of meta-data are supplied. - - Required fields: - name, version, URL - - Recommended fields: - (author and author_email) or (maintainer and maintainer_email)) - - Warns if any are missing. - """ - metadata = self.distribution.metadata - - missing = [] - for attr in ('name', 'version', 'url'): - if not (hasattr(metadata, attr) and getattr(metadata, attr)): - missing.append(attr) - - if missing: - self.warn("missing required meta-data: %s" % ', '.join(missing)) - if metadata.author: - if not metadata.author_email: - self.warn("missing meta-data: if 'author' supplied, " + - "'author_email' should be supplied too") - elif metadata.maintainer: - if not metadata.maintainer_email: - self.warn("missing meta-data: if 'maintainer' supplied, " + - "'maintainer_email' should be supplied too") - else: - self.warn("missing meta-data: either (author and author_email) " + - "or (maintainer and maintainer_email) " + - "should be supplied") - - def check_restructuredtext(self): - """Checks if the long string fields are reST-compliant.""" - data = self.distribution.get_long_description() - for warning in self._check_rst_data(data): - line = warning[-1].get('line') - if line is None: - warning = warning[1] - else: - warning = '%s (line %s)' % (warning[1], line) - self.warn(warning) - - def _check_rst_data(self, data): - """Returns warnings when the provided data doesn't compile.""" - # the include and csv_table directives need this to be a path - source_path = self.distribution.script_name or 'setup.py' - parser = Parser() - settings = frontend.OptionParser(components=(Parser,)).get_default_values() - settings.tab_width = 4 - settings.pep_references = None - settings.rfc_references = None - reporter = SilentReporter(source_path, - settings.report_level, - settings.halt_level, - stream=settings.warning_stream, - debug=settings.debug, - encoding=settings.error_encoding, - error_handler=settings.error_encoding_error_handler) - - document = nodes.document(settings, reporter, source=source_path) - document.note_source(source_path, -1) - try: - parser.parse(data, document) - except AttributeError as e: - reporter.messages.append( - (-1, 'Could not finish the parsing: %s.' % e, '', {})) - - return reporter.messages diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/clean.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/clean.py deleted file mode 100755 index 0cb2701..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/clean.py +++ /dev/null @@ -1,76 +0,0 @@ -"""distutils.command.clean - -Implements the Distutils 'clean' command.""" - -# contributed by Bastian Kleineidam , added 2000-03-18 - -import os -from distutils.core import Command -from distutils.dir_util import remove_tree -from distutils import log - -class clean(Command): - - description = "clean up temporary files from 'build' command" - user_options = [ - ('build-base=', 'b', - "base build directory (default: 'build.build-base')"), - ('build-lib=', None, - "build directory for all modules (default: 'build.build-lib')"), - ('build-temp=', 't', - "temporary build directory (default: 'build.build-temp')"), - ('build-scripts=', None, - "build directory for scripts (default: 'build.build-scripts')"), - ('bdist-base=', None, - "temporary directory for built distributions"), - ('all', 'a', - "remove all build output, not just temporary by-products") - ] - - boolean_options = ['all'] - - def initialize_options(self): - self.build_base = None - self.build_lib = None - self.build_temp = None - self.build_scripts = None - self.bdist_base = None - self.all = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('build_scripts', 'build_scripts'), - ('build_temp', 'build_temp')) - self.set_undefined_options('bdist', - ('bdist_base', 'bdist_base')) - - def run(self): - # remove the build/temp. directory (unless it's already - # gone) - if os.path.exists(self.build_temp): - remove_tree(self.build_temp, dry_run=self.dry_run) - else: - log.debug("'%s' does not exist -- can't clean it", - self.build_temp) - - if self.all: - # remove build directories - for directory in (self.build_lib, - self.bdist_base, - self.build_scripts): - if os.path.exists(directory): - remove_tree(directory, dry_run=self.dry_run) - else: - log.warn("'%s' does not exist -- can't clean it", - directory) - - # just for the heck of it, try to remove the base build directory: - # we might have emptied it right now, but if not we don't care - if not self.dry_run: - try: - os.rmdir(self.build_base) - log.info("removing '%s'", self.build_base) - except OSError: - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/config.py deleted file mode 100755 index aeda408..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/config.py +++ /dev/null @@ -1,344 +0,0 @@ -"""distutils.command.config - -Implements the Distutils 'config' command, a (mostly) empty command class -that exists mainly to be sub-classed by specific module distributions and -applications. The idea is that while every "config" command is different, -at least they're all named the same, and users always see "config" in the -list of standard commands. Also, this is a good place to put common -configure-like tasks: "try to compile this C code", or "figure out where -this header file lives". -""" - -import os, re - -from distutils.core import Command -from distutils.errors import DistutilsExecError -from distutils.sysconfig import customize_compiler -from distutils import log - -LANG_EXT = {"c": ".c", "c++": ".cxx"} - -class config(Command): - - description = "prepare to build" - - user_options = [ - ('compiler=', None, - "specify the compiler type"), - ('cc=', None, - "specify the compiler executable"), - ('include-dirs=', 'I', - "list of directories to search for header files"), - ('define=', 'D', - "C preprocessor macros to define"), - ('undef=', 'U', - "C preprocessor macros to undefine"), - ('libraries=', 'l', - "external C libraries to link with"), - ('library-dirs=', 'L', - "directories to search for external C libraries"), - - ('noisy', None, - "show every action (compile, link, run, ...) taken"), - ('dump-source', None, - "dump generated source files before attempting to compile them"), - ] - - - # The three standard command methods: since the "config" command - # does nothing by default, these are empty. - - def initialize_options(self): - self.compiler = None - self.cc = None - self.include_dirs = None - self.libraries = None - self.library_dirs = None - - # maximal output for now - self.noisy = 1 - self.dump_source = 1 - - # list of temporary files generated along-the-way that we have - # to clean at some point - self.temp_files = [] - - def finalize_options(self): - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - elif isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - if self.libraries is None: - self.libraries = [] - elif isinstance(self.libraries, str): - self.libraries = [self.libraries] - - if self.library_dirs is None: - self.library_dirs = [] - elif isinstance(self.library_dirs, str): - self.library_dirs = self.library_dirs.split(os.pathsep) - - def run(self): - pass - - # Utility methods for actual "config" commands. The interfaces are - # loosely based on Autoconf macros of similar names. Sub-classes - # may use these freely. - - def _check_compiler(self): - """Check that 'self.compiler' really is a CCompiler object; - if not, make it one. - """ - # We do this late, and only on-demand, because this is an expensive - # import. - from distutils.ccompiler import CCompiler, new_compiler - if not isinstance(self.compiler, CCompiler): - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, force=1) - customize_compiler(self.compiler) - if self.include_dirs: - self.compiler.set_include_dirs(self.include_dirs) - if self.libraries: - self.compiler.set_libraries(self.libraries) - if self.library_dirs: - self.compiler.set_library_dirs(self.library_dirs) - - def _gen_temp_sourcefile(self, body, headers, lang): - filename = "_configtest" + LANG_EXT[lang] - with open(filename, "w") as file: - if headers: - for header in headers: - file.write("#include <%s>\n" % header) - file.write("\n") - file.write(body) - if body[-1] != "\n": - file.write("\n") - return filename - - def _preprocess(self, body, headers, include_dirs, lang): - src = self._gen_temp_sourcefile(body, headers, lang) - out = "_configtest.i" - self.temp_files.extend([src, out]) - self.compiler.preprocess(src, out, include_dirs=include_dirs) - return (src, out) - - def _compile(self, body, headers, include_dirs, lang): - src = self._gen_temp_sourcefile(body, headers, lang) - if self.dump_source: - dump_file(src, "compiling '%s':" % src) - (obj,) = self.compiler.object_filenames([src]) - self.temp_files.extend([src, obj]) - self.compiler.compile([src], include_dirs=include_dirs) - return (src, obj) - - def _link(self, body, headers, include_dirs, libraries, library_dirs, - lang): - (src, obj) = self._compile(body, headers, include_dirs, lang) - prog = os.path.splitext(os.path.basename(src))[0] - self.compiler.link_executable([obj], prog, - libraries=libraries, - library_dirs=library_dirs, - target_lang=lang) - - if self.compiler.exe_extension is not None: - prog = prog + self.compiler.exe_extension - self.temp_files.append(prog) - - return (src, obj, prog) - - def _clean(self, *filenames): - if not filenames: - filenames = self.temp_files - self.temp_files = [] - log.info("removing: %s", ' '.join(filenames)) - for filename in filenames: - try: - os.remove(filename) - except OSError: - pass - - - # XXX these ignore the dry-run flag: what to do, what to do? even if - # you want a dry-run build, you still need some sort of configuration - # info. My inclination is to make it up to the real config command to - # consult 'dry_run', and assume a default (minimal) configuration if - # true. The problem with trying to do it here is that you'd have to - # return either true or false from all the 'try' methods, neither of - # which is correct. - - # XXX need access to the header search path and maybe default macros. - - def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"): - """Construct a source file from 'body' (a string containing lines - of C/C++ code) and 'headers' (a list of header files to include) - and run it through the preprocessor. Return true if the - preprocessor succeeded, false if there were any errors. - ('body' probably isn't of much use, but what the heck.) - """ - from distutils.ccompiler import CompileError - self._check_compiler() - ok = True - try: - self._preprocess(body, headers, include_dirs, lang) - except CompileError: - ok = False - - self._clean() - return ok - - def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, - lang="c"): - """Construct a source file (just like 'try_cpp()'), run it through - the preprocessor, and return true if any line of the output matches - 'pattern'. 'pattern' should either be a compiled regex object or a - string containing a regex. If both 'body' and 'headers' are None, - preprocesses an empty file -- which can be useful to determine the - symbols the preprocessor and compiler set by default. - """ - self._check_compiler() - src, out = self._preprocess(body, headers, include_dirs, lang) - - if isinstance(pattern, str): - pattern = re.compile(pattern) - - with open(out) as file: - match = False - while True: - line = file.readline() - if line == '': - break - if pattern.search(line): - match = True - break - - self._clean() - return match - - def try_compile(self, body, headers=None, include_dirs=None, lang="c"): - """Try to compile a source file built from 'body' and 'headers'. - Return true on success, false otherwise. - """ - from distutils.ccompiler import CompileError - self._check_compiler() - try: - self._compile(body, headers, include_dirs, lang) - ok = True - except CompileError: - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - def try_link(self, body, headers=None, include_dirs=None, libraries=None, - library_dirs=None, lang="c"): - """Try to compile and link a source file, built from 'body' and - 'headers', to executable form. Return true on success, false - otherwise. - """ - from distutils.ccompiler import CompileError, LinkError - self._check_compiler() - try: - self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - ok = True - except (CompileError, LinkError): - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - def try_run(self, body, headers=None, include_dirs=None, libraries=None, - library_dirs=None, lang="c"): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Return true on success, false - otherwise. - """ - from distutils.ccompiler import CompileError, LinkError - self._check_compiler() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - self.spawn([exe]) - ok = True - except (CompileError, LinkError, DistutilsExecError): - ok = False - - log.info(ok and "success!" or "failure.") - self._clean() - return ok - - - # -- High-level methods -------------------------------------------- - # (these are the ones that are actually likely to be useful - # when implementing a real-world config command!) - - def check_func(self, func, headers=None, include_dirs=None, - libraries=None, library_dirs=None, decl=0, call=0): - """Determine if function 'func' is available by constructing a - source file that refers to 'func', and compiles and links it. - If everything succeeds, returns true; otherwise returns false. - - The constructed source file starts out by including the header - files listed in 'headers'. If 'decl' is true, it then declares - 'func' (as "int func()"); you probably shouldn't supply 'headers' - and set 'decl' true in the same call, or you might get errors about - a conflicting declarations for 'func'. Finally, the constructed - 'main()' function either references 'func' or (if 'call' is true) - calls it. 'libraries' and 'library_dirs' are used when - linking. - """ - self._check_compiler() - body = [] - if decl: - body.append("int %s ();" % func) - body.append("int main () {") - if call: - body.append(" %s();" % func) - else: - body.append(" %s;" % func) - body.append("}") - body = "\n".join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_lib(self, library, library_dirs=None, headers=None, - include_dirs=None, other_libraries=[]): - """Determine if 'library' is available to be linked against, - without actually checking that any particular symbols are provided - by it. 'headers' will be used in constructing the source file to - be compiled, but the only effect of this is to check if all the - header files listed are available. Any libraries listed in - 'other_libraries' will be included in the link, in case 'library' - has symbols that depend on other libraries. - """ - self._check_compiler() - return self.try_link("int main (void) { }", headers, include_dirs, - [library] + other_libraries, library_dirs) - - def check_header(self, header, include_dirs=None, library_dirs=None, - lang="c"): - """Determine if the system header file named by 'header_file' - exists and can be found by the preprocessor; return true if so, - false otherwise. - """ - return self.try_cpp(body="/* No body */", headers=[header], - include_dirs=include_dirs) - -def dump_file(filename, head=None): - """Dumps a file content into log.info. - - If head is not None, will be dumped before the file content. - """ - if head is None: - log.info('%s', filename) - else: - log.info(head) - file = open(filename) - try: - log.info(file.read()) - finally: - file.close() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install.py deleted file mode 100755 index 41c17d8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install.py +++ /dev/null @@ -1,781 +0,0 @@ -"""distutils.command.install - -Implements the Distutils 'install' command.""" - -import sys -import os -import contextlib -import sysconfig -import itertools - -from distutils import log -from distutils.core import Command -from distutils.debug import DEBUG -from distutils.sysconfig import get_config_vars -from distutils.errors import DistutilsPlatformError -from distutils.file_util import write_file -from distutils.util import convert_path, subst_vars, change_root -from distutils.util import get_platform -from distutils.errors import DistutilsOptionError -from .. import _collections - -from site import USER_BASE -from site import USER_SITE -HAS_USER_SITE = True - -WINDOWS_SCHEME = { - 'purelib': '{base}/Lib/site-packages', - 'platlib': '{base}/Lib/site-packages', - 'headers': '{base}/Include/{dist_name}', - 'scripts': '{base}/Scripts', - 'data' : '{base}', -} - -INSTALL_SCHEMES = { - 'posix_prefix': { - 'purelib': '{base}/lib/{implementation_lower}{py_version_short}/site-packages', - 'platlib': '{platbase}/{platlibdir}/{implementation_lower}{py_version_short}/site-packages', - 'headers': '{base}/include/{implementation_lower}{py_version_short}{abiflags}/{dist_name}', - 'scripts': '{base}/bin', - 'data' : '{base}', - }, - 'posix_home': { - 'purelib': '{base}/lib/{implementation_lower}', - 'platlib': '{base}/{platlibdir}/{implementation_lower}', - 'headers': '{base}/include/{implementation_lower}/{dist_name}', - 'scripts': '{base}/bin', - 'data' : '{base}', - }, - 'nt': WINDOWS_SCHEME, - 'pypy': { - 'purelib': '{base}/site-packages', - 'platlib': '{base}/site-packages', - 'headers': '{base}/include/{dist_name}', - 'scripts': '{base}/bin', - 'data' : '{base}', - }, - 'pypy_nt': { - 'purelib': '{base}/site-packages', - 'platlib': '{base}/site-packages', - 'headers': '{base}/include/{dist_name}', - 'scripts': '{base}/Scripts', - 'data' : '{base}', - }, - } - -# user site schemes -if HAS_USER_SITE: - INSTALL_SCHEMES['nt_user'] = { - 'purelib': '{usersite}', - 'platlib': '{usersite}', - 'headers': '{userbase}/{implementation}{py_version_nodot_plat}/Include/{dist_name}', - 'scripts': '{userbase}/{implementation}{py_version_nodot_plat}/Scripts', - 'data' : '{userbase}', - } - - INSTALL_SCHEMES['posix_user'] = { - 'purelib': '{usersite}', - 'platlib': '{usersite}', - 'headers': - '{userbase}/include/{implementation_lower}{py_version_short}{abiflags}/{dist_name}', - 'scripts': '{userbase}/bin', - 'data' : '{userbase}', - } - -# The keys to an installation scheme; if any new types of files are to be -# installed, be sure to add an entry to every installation scheme above, -# and to SCHEME_KEYS here. -SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data') - - -def _load_sysconfig_schemes(): - with contextlib.suppress(AttributeError): - return { - scheme: sysconfig.get_paths(scheme, expand=False) - for scheme in sysconfig.get_scheme_names() - } - - -def _load_schemes(): - """ - Extend default schemes with schemes from sysconfig. - """ - - sysconfig_schemes = _load_sysconfig_schemes() or {} - - return { - scheme: { - **INSTALL_SCHEMES.get(scheme, {}), - **sysconfig_schemes.get(scheme, {}), - } - for scheme in set(itertools.chain(INSTALL_SCHEMES, sysconfig_schemes)) - } - - -def _get_implementation(): - if hasattr(sys, 'pypy_version_info'): - return 'PyPy' - else: - return 'Python' - - -def _select_scheme(ob, name): - scheme = _inject_headers(name, _load_scheme(_resolve_scheme(name))) - vars(ob).update(_remove_set(ob, _scheme_attrs(scheme))) - - -def _remove_set(ob, attrs): - """ - Include only attrs that are None in ob. - """ - return { - key: value - for key, value in attrs.items() - if getattr(ob, key) is None - } - - -def _resolve_scheme(name): - os_name, sep, key = name.partition('_') - try: - resolved = sysconfig.get_preferred_scheme(key) - except Exception: - resolved = _pypy_hack(name) - return resolved - - -def _load_scheme(name): - return _load_schemes()[name] - - -def _inject_headers(name, scheme): - """ - Given a scheme name and the resolved scheme, - if the scheme does not include headers, resolve - the fallback scheme for the name and use headers - from it. pypa/distutils#88 - """ - # Bypass the preferred scheme, which may not - # have defined headers. - fallback = _load_scheme(_pypy_hack(name)) - scheme.setdefault('headers', fallback['headers']) - return scheme - - -def _scheme_attrs(scheme): - """Resolve install directories by applying the install schemes.""" - return { - f'install_{key}': scheme[key] - for key in SCHEME_KEYS - } - - -def _pypy_hack(name): - PY37 = sys.version_info < (3, 8) - old_pypy = hasattr(sys, 'pypy_version_info') and PY37 - prefix = not name.endswith(('_user', '_home')) - pypy_name = 'pypy' + '_nt' * (os.name == 'nt') - return pypy_name if old_pypy and prefix else name - - -class install(Command): - - description = "install everything from build directory" - - user_options = [ - # Select installation scheme and set base director(y|ies) - ('prefix=', None, - "installation prefix"), - ('exec-prefix=', None, - "(Unix only) prefix for platform-specific files"), - ('home=', None, - "(Unix only) home directory to install under"), - - # Or, just set the base director(y|ies) - ('install-base=', None, - "base installation directory (instead of --prefix or --home)"), - ('install-platbase=', None, - "base installation directory for platform-specific files " + - "(instead of --exec-prefix or --home)"), - ('root=', None, - "install everything relative to this alternate root directory"), - - # Or, explicitly set the installation scheme - ('install-purelib=', None, - "installation directory for pure Python module distributions"), - ('install-platlib=', None, - "installation directory for non-pure module distributions"), - ('install-lib=', None, - "installation directory for all module distributions " + - "(overrides --install-purelib and --install-platlib)"), - - ('install-headers=', None, - "installation directory for C/C++ headers"), - ('install-scripts=', None, - "installation directory for Python scripts"), - ('install-data=', None, - "installation directory for data files"), - - # Byte-compilation options -- see install_lib.py for details, as - # these are duplicated from there (but only install_lib does - # anything with them). - ('compile', 'c', "compile .py to .pyc [default]"), - ('no-compile', None, "don't compile .py files"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - - # Miscellaneous control options - ('force', 'f', - "force installation (overwrite any existing files)"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - - # Where to install documentation (eventually!) - #('doc-format=', None, "format of documentation to generate"), - #('install-man=', None, "directory for Unix man pages"), - #('install-html=', None, "directory for HTML documentation"), - #('install-info=', None, "directory for GNU info files"), - - ('record=', None, - "filename in which to record list of installed files"), - ] - - boolean_options = ['compile', 'force', 'skip-build'] - - if HAS_USER_SITE: - user_options.append(('user', None, - "install in user site-package '%s'" % USER_SITE)) - boolean_options.append('user') - - negative_opt = {'no-compile' : 'compile'} - - - def initialize_options(self): - """Initializes options.""" - # High-level options: these select both an installation base - # and scheme. - self.prefix = None - self.exec_prefix = None - self.home = None - self.user = 0 - - # These select only the installation base; it's up to the user to - # specify the installation scheme (currently, that means supplying - # the --install-{platlib,purelib,scripts,data} options). - self.install_base = None - self.install_platbase = None - self.root = None - - # These options are the actual installation directories; if not - # supplied by the user, they are filled in using the installation - # scheme implied by prefix/exec-prefix/home and the contents of - # that installation scheme. - self.install_purelib = None # for pure module distributions - self.install_platlib = None # non-pure (dists w/ extensions) - self.install_headers = None # for C/C++ headers - self.install_lib = None # set to either purelib or platlib - self.install_scripts = None - self.install_data = None - self.install_userbase = USER_BASE - self.install_usersite = USER_SITE - - self.compile = None - self.optimize = None - - # Deprecated - # These two are for putting non-packagized distributions into their - # own directory and creating a .pth file if it makes sense. - # 'extra_path' comes from the setup file; 'install_path_file' can - # be turned off if it makes no sense to install a .pth file. (But - # better to install it uselessly than to guess wrong and not - # install it when it's necessary and would be used!) Currently, - # 'install_path_file' is always true unless some outsider meddles - # with it. - self.extra_path = None - self.install_path_file = 1 - - # 'force' forces installation, even if target files are not - # out-of-date. 'skip_build' skips running the "build" command, - # handy if you know it's not necessary. 'warn_dir' (which is *not* - # a user option, it's just there so the bdist_* commands can turn - # it off) determines whether we warn about installing to a - # directory not in sys.path. - self.force = 0 - self.skip_build = 0 - self.warn_dir = 1 - - # These are only here as a conduit from the 'build' command to the - # 'install_*' commands that do the real work. ('build_base' isn't - # actually used anywhere, but it might be useful in future.) They - # are not user options, because if the user told the install - # command where the build directory is, that wouldn't affect the - # build command. - self.build_base = None - self.build_lib = None - - # Not defined yet because we don't know anything about - # documentation yet. - #self.install_man = None - #self.install_html = None - #self.install_info = None - - self.record = None - - - # -- Option finalizing methods ------------------------------------- - # (This is rather more involved than for most commands, - # because this is where the policy for installing third- - # party Python modules on various platforms given a wide - # array of user input is decided. Yes, it's quite complex!) - - def finalize_options(self): - """Finalizes options.""" - # This method (and its helpers, like 'finalize_unix()', - # 'finalize_other()', and 'select_scheme()') is where the default - # installation directories for modules, extension modules, and - # anything else we care to install from a Python module - # distribution. Thus, this code makes a pretty important policy - # statement about how third-party stuff is added to a Python - # installation! Note that the actual work of installation is done - # by the relatively simple 'install_*' commands; they just take - # their orders from the installation directory options determined - # here. - - # Check for errors/inconsistencies in the options; first, stuff - # that's wrong on any platform. - - if ((self.prefix or self.exec_prefix or self.home) and - (self.install_base or self.install_platbase)): - raise DistutilsOptionError( - "must supply either prefix/exec-prefix/home or " + - "install-base/install-platbase -- not both") - - if self.home and (self.prefix or self.exec_prefix): - raise DistutilsOptionError( - "must supply either home or prefix/exec-prefix -- not both") - - if self.user and (self.prefix or self.exec_prefix or self.home or - self.install_base or self.install_platbase): - raise DistutilsOptionError("can't combine user with prefix, " - "exec_prefix/home, or install_(plat)base") - - # Next, stuff that's wrong (or dubious) only on certain platforms. - if os.name != "posix": - if self.exec_prefix: - self.warn("exec-prefix option ignored on this platform") - self.exec_prefix = None - - # Now the interesting logic -- so interesting that we farm it out - # to other methods. The goal of these methods is to set the final - # values for the install_{lib,scripts,data,...} options, using as - # input a heady brew of prefix, exec_prefix, home, install_base, - # install_platbase, user-supplied versions of - # install_{purelib,platlib,lib,scripts,data,...}, and the - # install schemes. Phew! - - self.dump_dirs("pre-finalize_{unix,other}") - - if os.name == 'posix': - self.finalize_unix() - else: - self.finalize_other() - - self.dump_dirs("post-finalize_{unix,other}()") - - # Expand configuration variables, tilde, etc. in self.install_base - # and self.install_platbase -- that way, we can use $base or - # $platbase in the other installation directories and not worry - # about needing recursive variable expansion (shudder). - - py_version = sys.version.split()[0] - (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix') - try: - abiflags = sys.abiflags - except AttributeError: - # sys.abiflags may not be defined on all platforms. - abiflags = '' - local_vars = { - 'dist_name': self.distribution.get_name(), - 'dist_version': self.distribution.get_version(), - 'dist_fullname': self.distribution.get_fullname(), - 'py_version': py_version, - 'py_version_short': '%d.%d' % sys.version_info[:2], - 'py_version_nodot': '%d%d' % sys.version_info[:2], - 'sys_prefix': prefix, - 'prefix': prefix, - 'sys_exec_prefix': exec_prefix, - 'exec_prefix': exec_prefix, - 'abiflags': abiflags, - 'platlibdir': getattr(sys, 'platlibdir', 'lib'), - 'implementation_lower': _get_implementation().lower(), - 'implementation': _get_implementation(), - } - - # vars for compatibility on older Pythons - compat_vars = dict( - # Python 3.9 and earlier - py_version_nodot_plat=getattr(sys, 'winver', '').replace('.', ''), - ) - - if HAS_USER_SITE: - local_vars['userbase'] = self.install_userbase - local_vars['usersite'] = self.install_usersite - - self.config_vars = _collections.DictStack( - [compat_vars, sysconfig.get_config_vars(), local_vars]) - - self.expand_basedirs() - - self.dump_dirs("post-expand_basedirs()") - - # Now define config vars for the base directories so we can expand - # everything else. - local_vars['base'] = self.install_base - local_vars['platbase'] = self.install_platbase - - if DEBUG: - from pprint import pprint - print("config vars:") - pprint(dict(self.config_vars)) - - # Expand "~" and configuration variables in the installation - # directories. - self.expand_dirs() - - self.dump_dirs("post-expand_dirs()") - - # Create directories in the home dir: - if self.user: - self.create_home_path() - - # Pick the actual directory to install all modules to: either - # install_purelib or install_platlib, depending on whether this - # module distribution is pure or not. Of course, if the user - # already specified install_lib, use their selection. - if self.install_lib is None: - if self.distribution.has_ext_modules(): # has extensions: non-pure - self.install_lib = self.install_platlib - else: - self.install_lib = self.install_purelib - - - # Convert directories from Unix /-separated syntax to the local - # convention. - self.convert_paths('lib', 'purelib', 'platlib', - 'scripts', 'data', 'headers', - 'userbase', 'usersite') - - # Deprecated - # Well, we're not actually fully completely finalized yet: we still - # have to deal with 'extra_path', which is the hack for allowing - # non-packagized module distributions (hello, Numerical Python!) to - # get their own directories. - self.handle_extra_path() - self.install_libbase = self.install_lib # needed for .pth file - self.install_lib = os.path.join(self.install_lib, self.extra_dirs) - - # If a new root directory was supplied, make all the installation - # dirs relative to it. - if self.root is not None: - self.change_roots('libbase', 'lib', 'purelib', 'platlib', - 'scripts', 'data', 'headers') - - self.dump_dirs("after prepending root") - - # Find out the build directories, ie. where to install from. - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib')) - - # Punt on doc directories for now -- after all, we're punting on - # documentation completely! - - def dump_dirs(self, msg): - """Dumps the list of user options.""" - if not DEBUG: - return - from distutils.fancy_getopt import longopt_xlate - log.debug(msg + ":") - for opt in self.user_options: - opt_name = opt[0] - if opt_name[-1] == "=": - opt_name = opt_name[0:-1] - if opt_name in self.negative_opt: - opt_name = self.negative_opt[opt_name] - opt_name = opt_name.translate(longopt_xlate) - val = not getattr(self, opt_name) - else: - opt_name = opt_name.translate(longopt_xlate) - val = getattr(self, opt_name) - log.debug(" %s: %s", opt_name, val) - - def finalize_unix(self): - """Finalizes options for posix platforms.""" - if self.install_base is not None or self.install_platbase is not None: - incomplete_scheme = ( - ( - self.install_lib is None and - self.install_purelib is None and - self.install_platlib is None - ) or - self.install_headers is None or - self.install_scripts is None or - self.install_data is None - ) - if incomplete_scheme: - raise DistutilsOptionError( - "install-base or install-platbase supplied, but " - "installation scheme is incomplete") - return - - if self.user: - if self.install_userbase is None: - raise DistutilsPlatformError( - "User base directory is not specified") - self.install_base = self.install_platbase = self.install_userbase - self.select_scheme("posix_user") - elif self.home is not None: - self.install_base = self.install_platbase = self.home - self.select_scheme("posix_home") - else: - if self.prefix is None: - if self.exec_prefix is not None: - raise DistutilsOptionError( - "must not supply exec-prefix without prefix") - - # Allow Fedora to add components to the prefix - _prefix_addition = getattr(sysconfig, '_prefix_addition', "") - - self.prefix = ( - os.path.normpath(sys.prefix) + _prefix_addition) - self.exec_prefix = ( - os.path.normpath(sys.exec_prefix) + _prefix_addition) - - else: - if self.exec_prefix is None: - self.exec_prefix = self.prefix - - self.install_base = self.prefix - self.install_platbase = self.exec_prefix - self.select_scheme("posix_prefix") - - def finalize_other(self): - """Finalizes options for non-posix platforms""" - if self.user: - if self.install_userbase is None: - raise DistutilsPlatformError( - "User base directory is not specified") - self.install_base = self.install_platbase = self.install_userbase - self.select_scheme(os.name + "_user") - elif self.home is not None: - self.install_base = self.install_platbase = self.home - self.select_scheme("posix_home") - else: - if self.prefix is None: - self.prefix = os.path.normpath(sys.prefix) - - self.install_base = self.install_platbase = self.prefix - try: - self.select_scheme(os.name) - except KeyError: - raise DistutilsPlatformError( - "I don't know how to install stuff on '%s'" % os.name) - - def select_scheme(self, name): - _select_scheme(self, name) - - def _expand_attrs(self, attrs): - for attr in attrs: - val = getattr(self, attr) - if val is not None: - if os.name == 'posix' or os.name == 'nt': - val = os.path.expanduser(val) - val = subst_vars(val, self.config_vars) - setattr(self, attr, val) - - def expand_basedirs(self): - """Calls `os.path.expanduser` on install_base, install_platbase and - root.""" - self._expand_attrs(['install_base', 'install_platbase', 'root']) - - def expand_dirs(self): - """Calls `os.path.expanduser` on install dirs.""" - self._expand_attrs(['install_purelib', 'install_platlib', - 'install_lib', 'install_headers', - 'install_scripts', 'install_data',]) - - def convert_paths(self, *names): - """Call `convert_path` over `names`.""" - for name in names: - attr = "install_" + name - setattr(self, attr, convert_path(getattr(self, attr))) - - def handle_extra_path(self): - """Set `path_file` and `extra_dirs` using `extra_path`.""" - if self.extra_path is None: - self.extra_path = self.distribution.extra_path - - if self.extra_path is not None: - log.warn( - "Distribution option extra_path is deprecated. " - "See issue27919 for details." - ) - if isinstance(self.extra_path, str): - self.extra_path = self.extra_path.split(',') - - if len(self.extra_path) == 1: - path_file = extra_dirs = self.extra_path[0] - elif len(self.extra_path) == 2: - path_file, extra_dirs = self.extra_path - else: - raise DistutilsOptionError( - "'extra_path' option must be a list, tuple, or " - "comma-separated string with 1 or 2 elements") - - # convert to local form in case Unix notation used (as it - # should be in setup scripts) - extra_dirs = convert_path(extra_dirs) - else: - path_file = None - extra_dirs = '' - - # XXX should we warn if path_file and not extra_dirs? (in which - # case the path file would be harmless but pointless) - self.path_file = path_file - self.extra_dirs = extra_dirs - - def change_roots(self, *names): - """Change the install directories pointed by name using root.""" - for name in names: - attr = "install_" + name - setattr(self, attr, change_root(self.root, getattr(self, attr))) - - def create_home_path(self): - """Create directories under ~.""" - if not self.user: - return - home = convert_path(os.path.expanduser("~")) - for name, path in self.config_vars.items(): - if str(path).startswith(home) and not os.path.isdir(path): - self.debug_print("os.makedirs('%s', 0o700)" % path) - os.makedirs(path, 0o700) - - # -- Command execution methods ------------------------------------- - - def run(self): - """Runs the command.""" - # Obviously have to build before we can install - if not self.skip_build: - self.run_command('build') - # If we built for any other platform, we can't install. - build_plat = self.distribution.get_command_obj('build').plat_name - # check warn_dir - it is a clue that the 'install' is happening - # internally, and not to sys.path, so we don't check the platform - # matches what we are running. - if self.warn_dir and build_plat != get_platform(): - raise DistutilsPlatformError("Can't install when " - "cross-compiling") - - # Run all sub-commands (at least those that need to be run) - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - if self.path_file: - self.create_path_file() - - # write list of installed files, if requested. - if self.record: - outputs = self.get_outputs() - if self.root: # strip any package prefix - root_len = len(self.root) - for counter in range(len(outputs)): - outputs[counter] = outputs[counter][root_len:] - self.execute(write_file, - (self.record, outputs), - "writing list of installed files to '%s'" % - self.record) - - sys_path = map(os.path.normpath, sys.path) - sys_path = map(os.path.normcase, sys_path) - install_lib = os.path.normcase(os.path.normpath(self.install_lib)) - if (self.warn_dir and - not (self.path_file and self.install_path_file) and - install_lib not in sys_path): - log.debug(("modules installed to '%s', which is not in " - "Python's module search path (sys.path) -- " - "you'll have to change the search path yourself"), - self.install_lib) - - def create_path_file(self): - """Creates the .pth file""" - filename = os.path.join(self.install_libbase, - self.path_file + ".pth") - if self.install_path_file: - self.execute(write_file, - (filename, [self.extra_dirs]), - "creating %s" % filename) - else: - self.warn("path file '%s' not created" % filename) - - - # -- Reporting methods --------------------------------------------- - - def get_outputs(self): - """Assembles the outputs of all the sub-commands.""" - outputs = [] - for cmd_name in self.get_sub_commands(): - cmd = self.get_finalized_command(cmd_name) - # Add the contents of cmd.get_outputs(), ensuring - # that outputs doesn't contain duplicate entries - for filename in cmd.get_outputs(): - if filename not in outputs: - outputs.append(filename) - - if self.path_file and self.install_path_file: - outputs.append(os.path.join(self.install_libbase, - self.path_file + ".pth")) - - return outputs - - def get_inputs(self): - """Returns the inputs of all the sub-commands""" - # XXX gee, this looks familiar ;-( - inputs = [] - for cmd_name in self.get_sub_commands(): - cmd = self.get_finalized_command(cmd_name) - inputs.extend(cmd.get_inputs()) - - return inputs - - # -- Predicates for sub-command list ------------------------------- - - def has_lib(self): - """Returns true if the current distribution has any Python - modules to install.""" - return (self.distribution.has_pure_modules() or - self.distribution.has_ext_modules()) - - def has_headers(self): - """Returns true if the current distribution has any headers to - install.""" - return self.distribution.has_headers() - - def has_scripts(self): - """Returns true if the current distribution has any scripts to. - install.""" - return self.distribution.has_scripts() - - def has_data(self): - """Returns true if the current distribution has any data to. - install.""" - return self.distribution.has_data_files() - - # 'sub_commands': a list of commands this command might have to run to - # get its work done. See cmd.py for more info. - sub_commands = [('install_lib', has_lib), - ('install_headers', has_headers), - ('install_scripts', has_scripts), - ('install_data', has_data), - ('install_egg_info', lambda self:True), - ] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_data.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_data.py deleted file mode 100755 index 947cd76..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_data.py +++ /dev/null @@ -1,79 +0,0 @@ -"""distutils.command.install_data - -Implements the Distutils 'install_data' command, for installing -platform-independent data files.""" - -# contributed by Bastian Kleineidam - -import os -from distutils.core import Command -from distutils.util import change_root, convert_path - -class install_data(Command): - - description = "install data files" - - user_options = [ - ('install-dir=', 'd', - "base directory for installing data files " - "(default: installation base dir)"), - ('root=', None, - "install everything relative to this alternate root directory"), - ('force', 'f', "force installation (overwrite existing files)"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - self.root = None - self.force = 0 - self.data_files = self.distribution.data_files - self.warn_dir = 1 - - def finalize_options(self): - self.set_undefined_options('install', - ('install_data', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) - - def run(self): - self.mkpath(self.install_dir) - for f in self.data_files: - if isinstance(f, str): - # it's a simple file, so copy it - f = convert_path(f) - if self.warn_dir: - self.warn("setup script did not provide a directory for " - "'%s' -- installing right in '%s'" % - (f, self.install_dir)) - (out, _) = self.copy_file(f, self.install_dir) - self.outfiles.append(out) - else: - # it's a tuple with path to install to and a list of files - dir = convert_path(f[0]) - if not os.path.isabs(dir): - dir = os.path.join(self.install_dir, dir) - elif self.root: - dir = change_root(self.root, dir) - self.mkpath(dir) - - if f[1] == []: - # If there are no files listed, the user must be - # trying to create an empty directory, so add the - # directory to the list of output files. - self.outfiles.append(dir) - else: - # Copy files, adding them to the list of output files. - for data in f[1]: - data = convert_path(data) - (out, _) = self.copy_file(data, dir) - self.outfiles.append(out) - - def get_inputs(self): - return self.data_files or [] - - def get_outputs(self): - return self.outfiles diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_egg_info.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_egg_info.py deleted file mode 100755 index adc0323..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_egg_info.py +++ /dev/null @@ -1,84 +0,0 @@ -"""distutils.command.install_egg_info - -Implements the Distutils 'install_egg_info' command, for installing -a package's PKG-INFO metadata.""" - - -from distutils.cmd import Command -from distutils import log, dir_util -import os, sys, re - -class install_egg_info(Command): - """Install an .egg-info file for the package""" - - description = "Install package's PKG-INFO metadata as an .egg-info file" - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ] - - def initialize_options(self): - self.install_dir = None - - @property - def basename(self): - """ - Allow basename to be overridden by child class. - Ref pypa/distutils#2. - """ - return "%s-%s-py%d.%d.egg-info" % ( - to_filename(safe_name(self.distribution.get_name())), - to_filename(safe_version(self.distribution.get_version())), - *sys.version_info[:2] - ) - - def finalize_options(self): - self.set_undefined_options('install_lib',('install_dir','install_dir')) - self.target = os.path.join(self.install_dir, self.basename) - self.outputs = [self.target] - - def run(self): - target = self.target - if os.path.isdir(target) and not os.path.islink(target): - dir_util.remove_tree(target, dry_run=self.dry_run) - elif os.path.exists(target): - self.execute(os.unlink,(self.target,),"Removing "+target) - elif not os.path.isdir(self.install_dir): - self.execute(os.makedirs, (self.install_dir,), - "Creating "+self.install_dir) - log.info("Writing %s", target) - if not self.dry_run: - with open(target, 'w', encoding='UTF-8') as f: - self.distribution.metadata.write_pkg_file(f) - - def get_outputs(self): - return self.outputs - - -# The following routines are taken from setuptools' pkg_resources module and -# can be replaced by importing them from pkg_resources once it is included -# in the stdlib. - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """Convert an arbitrary string to a standard version string - - Spaces become dots, and all other non-alphanumeric characters become - dashes, with runs of multiple dashes condensed to a single dash. - """ - version = version.replace(' ','.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-','_') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_headers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_headers.py deleted file mode 100755 index 9bb0b18..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_headers.py +++ /dev/null @@ -1,47 +0,0 @@ -"""distutils.command.install_headers - -Implements the Distutils 'install_headers' command, to install C/C++ header -files to the Python include directory.""" - -from distutils.core import Command - - -# XXX force is never used -class install_headers(Command): - - description = "install C/C++ header files" - - user_options = [('install-dir=', 'd', - "directory to install header files to"), - ('force', 'f', - "force installation (overwrite existing files)"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.install_dir = None - self.force = 0 - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', - ('install_headers', 'install_dir'), - ('force', 'force')) - - - def run(self): - headers = self.distribution.headers - if not headers: - return - - self.mkpath(self.install_dir) - for header in headers: - (out, _) = self.copy_file(header, self.install_dir) - self.outfiles.append(out) - - def get_inputs(self): - return self.distribution.headers or [] - - def get_outputs(self): - return self.outfiles diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_lib.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_lib.py deleted file mode 100755 index 6154cf0..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_lib.py +++ /dev/null @@ -1,217 +0,0 @@ -"""distutils.command.install_lib - -Implements the Distutils 'install_lib' command -(install all Python modules).""" - -import os -import importlib.util -import sys - -from distutils.core import Command -from distutils.errors import DistutilsOptionError - - -# Extension for Python source files. -PYTHON_SOURCE_EXTENSION = ".py" - -class install_lib(Command): - - description = "install all Python modules (extensions and pure Python)" - - # The byte-compilation options are a tad confusing. Here are the - # possible scenarios: - # 1) no compilation at all (--no-compile --no-optimize) - # 2) compile .pyc only (--compile --no-optimize; default) - # 3) compile .pyc and "opt-1" .pyc (--compile --optimize) - # 4) compile "opt-1" .pyc only (--no-compile --optimize) - # 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more) - # 6) compile "opt-2" .pyc only (--no-compile --optimize-more) - # - # The UI for this is two options, 'compile' and 'optimize'. - # 'compile' is strictly boolean, and only decides whether to - # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and - # decides both whether to generate .pyc files and what level of - # optimization to use. - - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ('build-dir=','b', "build directory (where to install from)"), - ('force', 'f', "force installation (overwrite existing files)"), - ('compile', 'c', "compile .py to .pyc [default]"), - ('no-compile', None, "don't compile .py files"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - ('skip-build', None, "skip the build steps"), - ] - - boolean_options = ['force', 'compile', 'skip-build'] - negative_opt = {'no-compile' : 'compile'} - - def initialize_options(self): - # let the 'install' command dictate our installation directory - self.install_dir = None - self.build_dir = None - self.force = 0 - self.compile = None - self.optimize = None - self.skip_build = None - - def finalize_options(self): - # Get all the information we need to install pure Python modules - # from the umbrella 'install' command -- build (source) directory, - # install (target) directory, and whether to compile .py files. - self.set_undefined_options('install', - ('build_lib', 'build_dir'), - ('install_lib', 'install_dir'), - ('force', 'force'), - ('compile', 'compile'), - ('optimize', 'optimize'), - ('skip_build', 'skip_build'), - ) - - if self.compile is None: - self.compile = True - if self.optimize is None: - self.optimize = False - - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - if self.optimize not in (0, 1, 2): - raise AssertionError - except (ValueError, AssertionError): - raise DistutilsOptionError("optimize must be 0, 1, or 2") - - def run(self): - # Make sure we have built everything we need first - self.build() - - # Install everything: simply dump the entire contents of the build - # directory to the installation directory (that's the beauty of - # having a build directory!) - outfiles = self.install() - - # (Optionally) compile .py to .pyc - if outfiles is not None and self.distribution.has_pure_modules(): - self.byte_compile(outfiles) - - # -- Top-level worker functions ------------------------------------ - # (called from 'run()') - - def build(self): - if not self.skip_build: - if self.distribution.has_pure_modules(): - self.run_command('build_py') - if self.distribution.has_ext_modules(): - self.run_command('build_ext') - - def install(self): - if os.path.isdir(self.build_dir): - outfiles = self.copy_tree(self.build_dir, self.install_dir) - else: - self.warn("'%s' does not exist -- no Python modules to install" % - self.build_dir) - return - return outfiles - - def byte_compile(self, files): - if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') - return - - from distutils.util import byte_compile - - # Get the "--root" directory supplied to the "install" command, - # and use it as a prefix to strip off the purported filename - # encoded in bytecode files. This is far from complete, but it - # should at least generate usable bytecode in RPM distributions. - install_root = self.get_finalized_command('install').root - - if self.compile: - byte_compile(files, optimize=0, - force=self.force, prefix=install_root, - dry_run=self.dry_run) - if self.optimize > 0: - byte_compile(files, optimize=self.optimize, - force=self.force, prefix=install_root, - verbose=self.verbose, dry_run=self.dry_run) - - - # -- Utility methods ----------------------------------------------- - - def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir): - if not has_any: - return [] - - build_cmd = self.get_finalized_command(build_cmd) - build_files = build_cmd.get_outputs() - build_dir = getattr(build_cmd, cmd_option) - - prefix_len = len(build_dir) + len(os.sep) - outputs = [] - for file in build_files: - outputs.append(os.path.join(output_dir, file[prefix_len:])) - - return outputs - - def _bytecode_filenames(self, py_filenames): - bytecode_files = [] - for py_file in py_filenames: - # Since build_py handles package data installation, the - # list of outputs can contain more than just .py files. - # Make sure we only report bytecode for the .py files. - ext = os.path.splitext(os.path.normcase(py_file))[1] - if ext != PYTHON_SOURCE_EXTENSION: - continue - if self.compile: - bytecode_files.append(importlib.util.cache_from_source( - py_file, optimization='')) - if self.optimize > 0: - bytecode_files.append(importlib.util.cache_from_source( - py_file, optimization=self.optimize)) - - return bytecode_files - - - # -- External interface -------------------------------------------- - # (called by outsiders) - - def get_outputs(self): - """Return the list of files that would be installed if this command - were actually run. Not affected by the "dry-run" flag or whether - modules have actually been built yet. - """ - pure_outputs = \ - self._mutate_outputs(self.distribution.has_pure_modules(), - 'build_py', 'build_lib', - self.install_dir) - if self.compile: - bytecode_outputs = self._bytecode_filenames(pure_outputs) - else: - bytecode_outputs = [] - - ext_outputs = \ - self._mutate_outputs(self.distribution.has_ext_modules(), - 'build_ext', 'build_lib', - self.install_dir) - - return pure_outputs + bytecode_outputs + ext_outputs - - def get_inputs(self): - """Get the list of files that are input to this command, ie. the - files that get installed as they are named in the build tree. - The files in this list correspond one-to-one to the output - filenames returned by 'get_outputs()'. - """ - inputs = [] - - if self.distribution.has_pure_modules(): - build_py = self.get_finalized_command('build_py') - inputs.extend(build_py.get_outputs()) - - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - inputs.extend(build_ext.get_outputs()) - - return inputs diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_scripts.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_scripts.py deleted file mode 100755 index 31a1130..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/install_scripts.py +++ /dev/null @@ -1,60 +0,0 @@ -"""distutils.command.install_scripts - -Implements the Distutils 'install_scripts' command, for installing -Python scripts.""" - -# contributed by Bastian Kleineidam - -import os -from distutils.core import Command -from distutils import log -from stat import ST_MODE - - -class install_scripts(Command): - - description = "install scripts (Python or otherwise)" - - user_options = [ - ('install-dir=', 'd', "directory to install scripts to"), - ('build-dir=','b', "build directory (where to install from)"), - ('force', 'f', "force installation (overwrite existing files)"), - ('skip-build', None, "skip the build steps"), - ] - - boolean_options = ['force', 'skip-build'] - - def initialize_options(self): - self.install_dir = None - self.force = 0 - self.build_dir = None - self.skip_build = None - - def finalize_options(self): - self.set_undefined_options('build', ('build_scripts', 'build_dir')) - self.set_undefined_options('install', - ('install_scripts', 'install_dir'), - ('force', 'force'), - ('skip_build', 'skip_build'), - ) - - def run(self): - if not self.skip_build: - self.run_command('build_scripts') - self.outfiles = self.copy_tree(self.build_dir, self.install_dir) - if os.name == 'posix': - # Set the executable bits (owner, group, and world) on - # all the scripts we just installed. - for file in self.get_outputs(): - if self.dry_run: - log.info("changing mode of %s", file) - else: - mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777 - log.info("changing mode of %s to %o", file, mode) - os.chmod(file, mode) - - def get_inputs(self): - return self.distribution.scripts or [] - - def get_outputs(self): - return self.outfiles or [] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/py37compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/py37compat.py deleted file mode 100755 index 754715a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/py37compat.py +++ /dev/null @@ -1,30 +0,0 @@ -import sys - - -def _pythonlib_compat(): - """ - On Python 3.7 and earlier, distutils would include the Python - library. See pypa/distutils#9. - """ - from distutils import sysconfig - if not sysconfig.get_config_var('Py_ENABLED_SHARED'): - return - - yield 'python{}.{}{}'.format( - sys.hexversion >> 24, - (sys.hexversion >> 16) & 0xff, - sysconfig.get_config_var('ABIFLAGS'), - ) - - -def compose(f1, f2): - return lambda *args, **kwargs: f1(f2(*args, **kwargs)) - - -pythonlib = ( - compose(list, _pythonlib_compat) - if sys.version_info < (3, 8) - and sys.platform != 'darwin' - and sys.platform[:3] != 'aix' - else list -) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/register.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/register.py deleted file mode 100755 index 0fac94e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/register.py +++ /dev/null @@ -1,304 +0,0 @@ -"""distutils.command.register - -Implements the Distutils 'register' command (register with the repository). -""" - -# created 2002/10/21, Richard Jones - -import getpass -import io -import urllib.parse, urllib.request -from warnings import warn - -from distutils.core import PyPIRCCommand -from distutils.errors import * -from distutils import log - -class register(PyPIRCCommand): - - description = ("register the distribution with the Python package index") - user_options = PyPIRCCommand.user_options + [ - ('list-classifiers', None, - 'list the valid Trove classifiers'), - ('strict', None , - 'Will stop the registering if the meta-data are not fully compliant') - ] - boolean_options = PyPIRCCommand.boolean_options + [ - 'verify', 'list-classifiers', 'strict'] - - sub_commands = [('check', lambda self: True)] - - def initialize_options(self): - PyPIRCCommand.initialize_options(self) - self.list_classifiers = 0 - self.strict = 0 - - def finalize_options(self): - PyPIRCCommand.finalize_options(self) - # setting options for the `check` subcommand - check_options = {'strict': ('register', self.strict), - 'restructuredtext': ('register', 1)} - self.distribution.command_options['check'] = check_options - - def run(self): - self.finalize_options() - self._set_config() - - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - if self.dry_run: - self.verify_metadata() - elif self.list_classifiers: - self.classifiers() - else: - self.send_metadata() - - def check_metadata(self): - """Deprecated API.""" - warn("distutils.command.register.check_metadata is deprecated, \ - use the check command instead", PendingDeprecationWarning) - check = self.distribution.get_command_obj('check') - check.ensure_finalized() - check.strict = self.strict - check.restructuredtext = 1 - check.run() - - def _set_config(self): - ''' Reads the configuration file and set attributes. - ''' - config = self._read_pypirc() - if config != {}: - self.username = config['username'] - self.password = config['password'] - self.repository = config['repository'] - self.realm = config['realm'] - self.has_config = True - else: - if self.repository not in ('pypi', self.DEFAULT_REPOSITORY): - raise ValueError('%s not found in .pypirc' % self.repository) - if self.repository == 'pypi': - self.repository = self.DEFAULT_REPOSITORY - self.has_config = False - - def classifiers(self): - ''' Fetch the list of classifiers from the server. - ''' - url = self.repository+'?:action=list_classifiers' - response = urllib.request.urlopen(url) - log.info(self._read_pypi_response(response)) - - def verify_metadata(self): - ''' Send the metadata to the package index server to be checked. - ''' - # send the info to the server and report the result - (code, result) = self.post_to_server(self.build_post_data('verify')) - log.info('Server response (%s): %s', code, result) - - def send_metadata(self): - ''' Send the metadata to the package index server. - - Well, do the following: - 1. figure who the user is, and then - 2. send the data as a Basic auth'ed POST. - - First we try to read the username/password from $HOME/.pypirc, - which is a ConfigParser-formatted file with a section - [distutils] containing username and password entries (both - in clear text). Eg: - - [distutils] - index-servers = - pypi - - [pypi] - username: fred - password: sekrit - - Otherwise, to figure who the user is, we offer the user three - choices: - - 1. use existing login, - 2. register as a new user, or - 3. set the password to a random string and email the user. - - ''' - # see if we can short-cut and get the username/password from the - # config - if self.has_config: - choice = '1' - username = self.username - password = self.password - else: - choice = 'x' - username = password = '' - - # get the user's login info - choices = '1 2 3 4'.split() - while choice not in choices: - self.announce('''\ -We need to know who you are, so please choose either: - 1. use your existing login, - 2. register as a new user, - 3. have the server generate a new password for you (and email it to you), or - 4. quit -Your selection [default 1]: ''', log.INFO) - choice = input() - if not choice: - choice = '1' - elif choice not in choices: - print('Please choose one of the four options!') - - if choice == '1': - # get the username and password - while not username: - username = input('Username: ') - while not password: - password = getpass.getpass('Password: ') - - # set up the authentication - auth = urllib.request.HTTPPasswordMgr() - host = urllib.parse.urlparse(self.repository)[1] - auth.add_password(self.realm, host, username, password) - # send the info to the server and report the result - code, result = self.post_to_server(self.build_post_data('submit'), - auth) - self.announce('Server response (%s): %s' % (code, result), - log.INFO) - - # possibly save the login - if code == 200: - if self.has_config: - # sharing the password in the distribution instance - # so the upload command can reuse it - self.distribution.password = password - else: - self.announce(('I can store your PyPI login so future ' - 'submissions will be faster.'), log.INFO) - self.announce('(the login will be stored in %s)' % \ - self._get_rc_file(), log.INFO) - choice = 'X' - while choice.lower() not in 'yn': - choice = input('Save your login (y/N)?') - if not choice: - choice = 'n' - if choice.lower() == 'y': - self._store_pypirc(username, password) - - elif choice == '2': - data = {':action': 'user'} - data['name'] = data['password'] = data['email'] = '' - data['confirm'] = None - while not data['name']: - data['name'] = input('Username: ') - while data['password'] != data['confirm']: - while not data['password']: - data['password'] = getpass.getpass('Password: ') - while not data['confirm']: - data['confirm'] = getpass.getpass(' Confirm: ') - if data['password'] != data['confirm']: - data['password'] = '' - data['confirm'] = None - print("Password and confirm don't match!") - while not data['email']: - data['email'] = input(' EMail: ') - code, result = self.post_to_server(data) - if code != 200: - log.info('Server response (%s): %s', code, result) - else: - log.info('You will receive an email shortly.') - log.info(('Follow the instructions in it to ' - 'complete registration.')) - elif choice == '3': - data = {':action': 'password_reset'} - data['email'] = '' - while not data['email']: - data['email'] = input('Your email address: ') - code, result = self.post_to_server(data) - log.info('Server response (%s): %s', code, result) - - def build_post_data(self, action): - # figure the data to send - the metadata plus some additional - # information used by the package server - meta = self.distribution.metadata - data = { - ':action': action, - 'metadata_version' : '1.0', - 'name': meta.get_name(), - 'version': meta.get_version(), - 'summary': meta.get_description(), - 'home_page': meta.get_url(), - 'author': meta.get_contact(), - 'author_email': meta.get_contact_email(), - 'license': meta.get_licence(), - 'description': meta.get_long_description(), - 'keywords': meta.get_keywords(), - 'platform': meta.get_platforms(), - 'classifiers': meta.get_classifiers(), - 'download_url': meta.get_download_url(), - # PEP 314 - 'provides': meta.get_provides(), - 'requires': meta.get_requires(), - 'obsoletes': meta.get_obsoletes(), - } - if data['provides'] or data['requires'] or data['obsoletes']: - data['metadata_version'] = '1.1' - return data - - def post_to_server(self, data, auth=None): - ''' Post a query to the server, and return a string response. - ''' - if 'name' in data: - self.announce('Registering %s to %s' % (data['name'], - self.repository), - log.INFO) - # Build up the MIME payload for the urllib2 POST data - boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = '\n--' + boundary - end_boundary = sep_boundary + '--' - body = io.StringIO() - for key, value in data.items(): - # handle multiple entries for the same name - if type(value) not in (type([]), type( () )): - value = [value] - for value in value: - value = str(value) - body.write(sep_boundary) - body.write('\nContent-Disposition: form-data; name="%s"'%key) - body.write("\n\n") - body.write(value) - if value and value[-1] == '\r': - body.write('\n') # write an extra newline (lurve Macs) - body.write(end_boundary) - body.write("\n") - body = body.getvalue().encode("utf-8") - - # build the Request - headers = { - 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary, - 'Content-length': str(len(body)) - } - req = urllib.request.Request(self.repository, body, headers) - - # handle HTTP and include the Basic Auth handler - opener = urllib.request.build_opener( - urllib.request.HTTPBasicAuthHandler(password_mgr=auth) - ) - data = '' - try: - result = opener.open(req) - except urllib.error.HTTPError as e: - if self.show_response: - data = e.fp.read() - result = e.code, e.msg - except urllib.error.URLError as e: - result = 500, str(e) - else: - if self.show_response: - data = self._read_pypi_response(result) - result = 200, 'OK' - if self.show_response: - msg = '\n'.join(('-' * 75, data, '-' * 75)) - self.announce(msg, log.INFO) - return result diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/sdist.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/sdist.py deleted file mode 100755 index b4996fc..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/sdist.py +++ /dev/null @@ -1,494 +0,0 @@ -"""distutils.command.sdist - -Implements the Distutils 'sdist' command (create a source distribution).""" - -import os -import sys -from glob import glob -from warnings import warn - -from distutils.core import Command -from distutils import dir_util -from distutils import file_util -from distutils import archive_util -from distutils.text_file import TextFile -from distutils.filelist import FileList -from distutils import log -from distutils.util import convert_path -from distutils.errors import DistutilsTemplateError, DistutilsOptionError - - -def show_formats(): - """Print all possible values for the 'formats' option (used by - the "--help-formats" command-line option). - """ - from distutils.fancy_getopt import FancyGetopt - from distutils.archive_util import ARCHIVE_FORMATS - formats = [] - for format in ARCHIVE_FORMATS.keys(): - formats.append(("formats=" + format, None, - ARCHIVE_FORMATS[format][2])) - formats.sort() - FancyGetopt(formats).print_help( - "List of available source distribution formats:") - - -class sdist(Command): - - description = "create a source distribution (tarball, zip file, etc.)" - - def checking_metadata(self): - """Callable used for the check sub-command. - - Placed here so user_options can view it""" - return self.metadata_check - - user_options = [ - ('template=', 't', - "name of manifest template file [default: MANIFEST.in]"), - ('manifest=', 'm', - "name of manifest file [default: MANIFEST]"), - ('use-defaults', None, - "include the default file set in the manifest " - "[default; disable with --no-defaults]"), - ('no-defaults', None, - "don't include the default file set"), - ('prune', None, - "specifically exclude files/directories that should not be " - "distributed (build tree, RCS/CVS dirs, etc.) " - "[default; disable with --no-prune]"), - ('no-prune', None, - "don't automatically exclude anything"), - ('manifest-only', 'o', - "just regenerate the manifest and then stop " - "(implies --force-manifest)"), - ('force-manifest', 'f', - "forcibly regenerate the manifest and carry on as usual. " - "Deprecated: now the manifest is always regenerated."), - ('formats=', None, - "formats for source distribution (comma-separated list)"), - ('keep-temp', 'k', - "keep the distribution tree around after creating " + - "archive file(s)"), - ('dist-dir=', 'd', - "directory to put the source distribution archive(s) in " - "[default: dist]"), - ('metadata-check', None, - "Ensure that all required elements of meta-data " - "are supplied. Warn if any missing. [default]"), - ('owner=', 'u', - "Owner name used when creating a tar file [default: current user]"), - ('group=', 'g', - "Group name used when creating a tar file [default: current group]"), - ] - - boolean_options = ['use-defaults', 'prune', - 'manifest-only', 'force-manifest', - 'keep-temp', 'metadata-check'] - - help_options = [ - ('help-formats', None, - "list available distribution formats", show_formats), - ] - - negative_opt = {'no-defaults': 'use-defaults', - 'no-prune': 'prune' } - - sub_commands = [('check', checking_metadata)] - - READMES = ('README', 'README.txt', 'README.rst') - - def initialize_options(self): - # 'template' and 'manifest' are, respectively, the names of - # the manifest template and manifest file. - self.template = None - self.manifest = None - - # 'use_defaults': if true, we will include the default file set - # in the manifest - self.use_defaults = 1 - self.prune = 1 - - self.manifest_only = 0 - self.force_manifest = 0 - - self.formats = ['gztar'] - self.keep_temp = 0 - self.dist_dir = None - - self.archive_files = None - self.metadata_check = 1 - self.owner = None - self.group = None - - def finalize_options(self): - if self.manifest is None: - self.manifest = "MANIFEST" - if self.template is None: - self.template = "MANIFEST.in" - - self.ensure_string_list('formats') - - bad_format = archive_util.check_archive_formats(self.formats) - if bad_format: - raise DistutilsOptionError( - "unknown archive format '%s'" % bad_format) - - if self.dist_dir is None: - self.dist_dir = "dist" - - def run(self): - # 'filelist' contains the list of files that will make up the - # manifest - self.filelist = FileList() - - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - # Do whatever it takes to get the list of files to process - # (process the manifest template, read an existing manifest, - # whatever). File list is accumulated in 'self.filelist'. - self.get_file_list() - - # If user just wanted us to regenerate the manifest, stop now. - if self.manifest_only: - return - - # Otherwise, go ahead and create the source distribution tarball, - # or zipfile, or whatever. - self.make_distribution() - - def check_metadata(self): - """Deprecated API.""" - warn("distutils.command.sdist.check_metadata is deprecated, \ - use the check command instead", PendingDeprecationWarning) - check = self.distribution.get_command_obj('check') - check.ensure_finalized() - check.run() - - def get_file_list(self): - """Figure out the list of files to include in the source - distribution, and put it in 'self.filelist'. This might involve - reading the manifest template (and writing the manifest), or just - reading the manifest, or just using the default file set -- it all - depends on the user's options. - """ - # new behavior when using a template: - # the file list is recalculated every time because - # even if MANIFEST.in or setup.py are not changed - # the user might have added some files in the tree that - # need to be included. - # - # This makes --force the default and only behavior with templates. - template_exists = os.path.isfile(self.template) - if not template_exists and self._manifest_is_not_generated(): - self.read_manifest() - self.filelist.sort() - self.filelist.remove_duplicates() - return - - if not template_exists: - self.warn(("manifest template '%s' does not exist " + - "(using default file list)") % - self.template) - self.filelist.findall() - - if self.use_defaults: - self.add_defaults() - - if template_exists: - self.read_template() - - if self.prune: - self.prune_file_list() - - self.filelist.sort() - self.filelist.remove_duplicates() - self.write_manifest() - - def add_defaults(self): - """Add all the default files to self.filelist: - - README or README.txt - - setup.py - - test/test*.py - - all pure Python modules mentioned in setup script - - all files pointed by package_data (build_py) - - all files defined in data_files. - - all files defined as scripts. - - all C sources listed as part of extensions or C libraries - in the setup script (doesn't catch C headers!) - Warns if (README or README.txt) or setup.py are missing; everything - else is optional. - """ - self._add_defaults_standards() - self._add_defaults_optional() - self._add_defaults_python() - self._add_defaults_data_files() - self._add_defaults_ext() - self._add_defaults_c_libs() - self._add_defaults_scripts() - - @staticmethod - def _cs_path_exists(fspath): - """ - Case-sensitive path existence check - - >>> sdist._cs_path_exists(__file__) - True - >>> sdist._cs_path_exists(__file__.upper()) - False - """ - if not os.path.exists(fspath): - return False - # make absolute so we always have a directory - abspath = os.path.abspath(fspath) - directory, filename = os.path.split(abspath) - return filename in os.listdir(directory) - - def _add_defaults_standards(self): - standards = [self.READMES, self.distribution.script_name] - for fn in standards: - if isinstance(fn, tuple): - alts = fn - got_it = False - for fn in alts: - if self._cs_path_exists(fn): - got_it = True - self.filelist.append(fn) - break - - if not got_it: - self.warn("standard file not found: should have one of " + - ', '.join(alts)) - else: - if self._cs_path_exists(fn): - self.filelist.append(fn) - else: - self.warn("standard file '%s' not found" % fn) - - def _add_defaults_optional(self): - optional = ['test/test*.py', 'setup.cfg'] - for pattern in optional: - files = filter(os.path.isfile, glob(pattern)) - self.filelist.extend(files) - - def _add_defaults_python(self): - # build_py is used to get: - # - python modules - # - files defined in package_data - build_py = self.get_finalized_command('build_py') - - # getting python files - if self.distribution.has_pure_modules(): - self.filelist.extend(build_py.get_source_files()) - - # getting package_data files - # (computed in build_py.data_files by build_py.finalize_options) - for pkg, src_dir, build_dir, filenames in build_py.data_files: - for filename in filenames: - self.filelist.append(os.path.join(src_dir, filename)) - - def _add_defaults_data_files(self): - # getting distribution.data_files - if self.distribution.has_data_files(): - for item in self.distribution.data_files: - if isinstance(item, str): - # plain file - item = convert_path(item) - if os.path.isfile(item): - self.filelist.append(item) - else: - # a (dirname, filenames) tuple - dirname, filenames = item - for f in filenames: - f = convert_path(f) - if os.path.isfile(f): - self.filelist.append(f) - - def _add_defaults_ext(self): - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - self.filelist.extend(build_ext.get_source_files()) - - def _add_defaults_c_libs(self): - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.filelist.extend(build_clib.get_source_files()) - - def _add_defaults_scripts(self): - if self.distribution.has_scripts(): - build_scripts = self.get_finalized_command('build_scripts') - self.filelist.extend(build_scripts.get_source_files()) - - def read_template(self): - """Read and parse manifest template file named by self.template. - - (usually "MANIFEST.in") The parsing and processing is done by - 'self.filelist', which updates itself accordingly. - """ - log.info("reading manifest template '%s'", self.template) - template = TextFile(self.template, strip_comments=1, skip_blanks=1, - join_lines=1, lstrip_ws=1, rstrip_ws=1, - collapse_join=1) - - try: - while True: - line = template.readline() - if line is None: # end of file - break - - try: - self.filelist.process_template_line(line) - # the call above can raise a DistutilsTemplateError for - # malformed lines, or a ValueError from the lower-level - # convert_path function - except (DistutilsTemplateError, ValueError) as msg: - self.warn("%s, line %d: %s" % (template.filename, - template.current_line, - msg)) - finally: - template.close() - - def prune_file_list(self): - """Prune off branches that might slip into the file list as created - by 'read_template()', but really don't belong there: - * the build tree (typically "build") - * the release tree itself (only an issue if we ran "sdist" - previously with --keep-temp, or it aborted) - * any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories - """ - build = self.get_finalized_command('build') - base_dir = self.distribution.get_fullname() - - self.filelist.exclude_pattern(None, prefix=build.build_base) - self.filelist.exclude_pattern(None, prefix=base_dir) - - if sys.platform == 'win32': - seps = r'/|\\' - else: - seps = '/' - - vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr', - '_darcs'] - vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps) - self.filelist.exclude_pattern(vcs_ptrn, is_regex=1) - - def write_manifest(self): - """Write the file list in 'self.filelist' (presumably as filled in - by 'add_defaults()' and 'read_template()') to the manifest file - named by 'self.manifest'. - """ - if self._manifest_is_not_generated(): - log.info("not writing to manually maintained " - "manifest file '%s'" % self.manifest) - return - - content = self.filelist.files[:] - content.insert(0, '# file GENERATED by distutils, do NOT edit') - self.execute(file_util.write_file, (self.manifest, content), - "writing manifest file '%s'" % self.manifest) - - def _manifest_is_not_generated(self): - # check for special comment used in 3.1.3 and higher - if not os.path.isfile(self.manifest): - return False - - fp = open(self.manifest) - try: - first_line = fp.readline() - finally: - fp.close() - return first_line != '# file GENERATED by distutils, do NOT edit\n' - - def read_manifest(self): - """Read the manifest file (named by 'self.manifest') and use it to - fill in 'self.filelist', the list of files to include in the source - distribution. - """ - log.info("reading manifest file '%s'", self.manifest) - with open(self.manifest) as manifest: - for line in manifest: - # ignore comments and blank lines - line = line.strip() - if line.startswith('#') or not line: - continue - self.filelist.append(line) - - def make_release_tree(self, base_dir, files): - """Create the directory tree that will become the source - distribution archive. All directories implied by the filenames in - 'files' are created under 'base_dir', and then we hard link or copy - (if hard linking is unavailable) those files into place. - Essentially, this duplicates the developer's source tree, but in a - directory named after the distribution, containing only the files - to be distributed. - """ - # Create all the directories under 'base_dir' necessary to - # put 'files' there; the 'mkpath()' is just so we don't die - # if the manifest happens to be empty. - self.mkpath(base_dir) - dir_util.create_tree(base_dir, files, dry_run=self.dry_run) - - # And walk over the list of files, either making a hard link (if - # os.link exists) to each one that doesn't already exist in its - # corresponding location under 'base_dir', or copying each file - # that's out-of-date in 'base_dir'. (Usually, all files will be - # out-of-date, because by default we blow away 'base_dir' when - # we're done making the distribution archives.) - - if hasattr(os, 'link'): # can make hard links on this system - link = 'hard' - msg = "making hard links in %s..." % base_dir - else: # nope, have to copy - link = None - msg = "copying files to %s..." % base_dir - - if not files: - log.warn("no files to distribute -- empty manifest?") - else: - log.info(msg) - for file in files: - if not os.path.isfile(file): - log.warn("'%s' not a regular file -- skipping", file) - else: - dest = os.path.join(base_dir, file) - self.copy_file(file, dest, link=link) - - self.distribution.metadata.write_pkg_info(base_dir) - - def make_distribution(self): - """Create the source distribution(s). First, we create the release - tree with 'make_release_tree()'; then, we create all required - archive files (according to 'self.formats') from the release tree. - Finally, we clean up by blowing away the release tree (unless - 'self.keep_temp' is true). The list of archive files created is - stored so it can be retrieved later by 'get_archive_files()'. - """ - # Don't warn about missing meta-data here -- should be (and is!) - # done elsewhere. - base_dir = self.distribution.get_fullname() - base_name = os.path.join(self.dist_dir, base_dir) - - self.make_release_tree(base_dir, self.filelist.files) - archive_files = [] # remember names of files we create - # tar archive must be created last to avoid overwrite and remove - if 'tar' in self.formats: - self.formats.append(self.formats.pop(self.formats.index('tar'))) - - for fmt in self.formats: - file = self.make_archive(base_name, fmt, base_dir=base_dir, - owner=self.owner, group=self.group) - archive_files.append(file) - self.distribution.dist_files.append(('sdist', '', file)) - - self.archive_files = archive_files - - if not self.keep_temp: - dir_util.remove_tree(base_dir, dry_run=self.dry_run) - - def get_archive_files(self): - """Return the list of archive files created when the command - was run, or None if the command hasn't run yet. - """ - return self.archive_files diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/upload.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/upload.py deleted file mode 100755 index 95e9fda..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/command/upload.py +++ /dev/null @@ -1,214 +0,0 @@ -""" -distutils.command.upload - -Implements the Distutils 'upload' subcommand (upload package to a package -index). -""" - -import os -import io -import hashlib -from base64 import standard_b64encode -from urllib.request import urlopen, Request, HTTPError -from urllib.parse import urlparse -from distutils.errors import DistutilsError, DistutilsOptionError -from distutils.core import PyPIRCCommand -from distutils.spawn import spawn -from distutils import log - - -# PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256) -# https://bugs.python.org/issue40698 -_FILE_CONTENT_DIGESTS = { - "md5_digest": getattr(hashlib, "md5", None), - "sha256_digest": getattr(hashlib, "sha256", None), - "blake2_256_digest": getattr(hashlib, "blake2b", None), -} - - -class upload(PyPIRCCommand): - - description = "upload binary package to PyPI" - - user_options = PyPIRCCommand.user_options + [ - ('sign', 's', - 'sign files to upload using gpg'), - ('identity=', 'i', 'GPG identity used to sign files'), - ] - - boolean_options = PyPIRCCommand.boolean_options + ['sign'] - - def initialize_options(self): - PyPIRCCommand.initialize_options(self) - self.username = '' - self.password = '' - self.show_response = 0 - self.sign = False - self.identity = None - - def finalize_options(self): - PyPIRCCommand.finalize_options(self) - if self.identity and not self.sign: - raise DistutilsOptionError( - "Must use --sign for --identity to have meaning" - ) - config = self._read_pypirc() - if config != {}: - self.username = config['username'] - self.password = config['password'] - self.repository = config['repository'] - self.realm = config['realm'] - - # getting the password from the distribution - # if previously set by the register command - if not self.password and self.distribution.password: - self.password = self.distribution.password - - def run(self): - if not self.distribution.dist_files: - msg = ("Must create and upload files in one command " - "(e.g. setup.py sdist upload)") - raise DistutilsOptionError(msg) - for command, pyversion, filename in self.distribution.dist_files: - self.upload_file(command, pyversion, filename) - - def upload_file(self, command, pyversion, filename): - # Makes sure the repository URL is compliant - schema, netloc, url, params, query, fragments = \ - urlparse(self.repository) - if params or query or fragments: - raise AssertionError("Incompatible url %s" % self.repository) - - if schema not in ('http', 'https'): - raise AssertionError("unsupported schema " + schema) - - # Sign if requested - if self.sign: - gpg_args = ["gpg", "--detach-sign", "-a", filename] - if self.identity: - gpg_args[2:2] = ["--local-user", self.identity] - spawn(gpg_args, - dry_run=self.dry_run) - - # Fill in the data - send all the meta-data in case we need to - # register a new release - f = open(filename,'rb') - try: - content = f.read() - finally: - f.close() - - meta = self.distribution.metadata - data = { - # action - ':action': 'file_upload', - 'protocol_version': '1', - - # identify release - 'name': meta.get_name(), - 'version': meta.get_version(), - - # file content - 'content': (os.path.basename(filename),content), - 'filetype': command, - 'pyversion': pyversion, - - # additional meta-data - 'metadata_version': '1.0', - 'summary': meta.get_description(), - 'home_page': meta.get_url(), - 'author': meta.get_contact(), - 'author_email': meta.get_contact_email(), - 'license': meta.get_licence(), - 'description': meta.get_long_description(), - 'keywords': meta.get_keywords(), - 'platform': meta.get_platforms(), - 'classifiers': meta.get_classifiers(), - 'download_url': meta.get_download_url(), - # PEP 314 - 'provides': meta.get_provides(), - 'requires': meta.get_requires(), - 'obsoletes': meta.get_obsoletes(), - } - - data['comment'] = '' - - # file content digests - for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items(): - if digest_cons is None: - continue - try: - data[digest_name] = digest_cons(content).hexdigest() - except ValueError: - # hash digest not available or blocked by security policy - pass - - if self.sign: - with open(filename + ".asc", "rb") as f: - data['gpg_signature'] = (os.path.basename(filename) + ".asc", - f.read()) - - # set up the authentication - user_pass = (self.username + ":" + self.password).encode('ascii') - # The exact encoding of the authentication string is debated. - # Anyway PyPI only accepts ascii for both username or password. - auth = "Basic " + standard_b64encode(user_pass).decode('ascii') - - # Build up the MIME payload for the POST data - boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = b'\r\n--' + boundary.encode('ascii') - end_boundary = sep_boundary + b'--\r\n' - body = io.BytesIO() - for key, value in data.items(): - title = '\r\nContent-Disposition: form-data; name="%s"' % key - # handle multiple entries for the same name - if not isinstance(value, list): - value = [value] - for value in value: - if type(value) is tuple: - title += '; filename="%s"' % value[0] - value = value[1] - else: - value = str(value).encode('utf-8') - body.write(sep_boundary) - body.write(title.encode('utf-8')) - body.write(b"\r\n\r\n") - body.write(value) - body.write(end_boundary) - body = body.getvalue() - - msg = "Submitting %s to %s" % (filename, self.repository) - self.announce(msg, log.INFO) - - # build the Request - headers = { - 'Content-type': 'multipart/form-data; boundary=%s' % boundary, - 'Content-length': str(len(body)), - 'Authorization': auth, - } - - request = Request(self.repository, data=body, - headers=headers) - # send the data - try: - result = urlopen(request) - status = result.getcode() - reason = result.msg - except HTTPError as e: - status = e.code - reason = e.msg - except OSError as e: - self.announce(str(e), log.ERROR) - raise - - if status == 200: - self.announce('Server response (%s): %s' % (status, reason), - log.INFO) - if self.show_response: - text = self._read_pypi_response(result) - msg = '\n'.join(('-' * 75, text, '-' * 75)) - self.announce(msg, log.INFO) - else: - msg = 'Upload failed (%s): %s' % (status, reason) - self.announce(msg, log.ERROR) - raise DistutilsError(msg) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/config.py deleted file mode 100755 index 2171abd..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/config.py +++ /dev/null @@ -1,130 +0,0 @@ -"""distutils.pypirc - -Provides the PyPIRCCommand class, the base class for the command classes -that uses .pypirc in the distutils.command package. -""" -import os -from configparser import RawConfigParser - -from distutils.cmd import Command - -DEFAULT_PYPIRC = """\ -[distutils] -index-servers = - pypi - -[pypi] -username:%s -password:%s -""" - -class PyPIRCCommand(Command): - """Base command that knows how to handle the .pypirc file - """ - DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' - DEFAULT_REALM = 'pypi' - repository = None - realm = None - - user_options = [ - ('repository=', 'r', - "url of repository [default: %s]" % \ - DEFAULT_REPOSITORY), - ('show-response', None, - 'display full response text from server')] - - boolean_options = ['show-response'] - - def _get_rc_file(self): - """Returns rc file path.""" - return os.path.join(os.path.expanduser('~'), '.pypirc') - - def _store_pypirc(self, username, password): - """Creates a default .pypirc file.""" - rc = self._get_rc_file() - with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: - f.write(DEFAULT_PYPIRC % (username, password)) - - def _read_pypirc(self): - """Reads the .pypirc file.""" - rc = self._get_rc_file() - if os.path.exists(rc): - self.announce('Using PyPI login from %s' % rc) - repository = self.repository or self.DEFAULT_REPOSITORY - - config = RawConfigParser() - config.read(rc) - sections = config.sections() - if 'distutils' in sections: - # let's get the list of servers - index_servers = config.get('distutils', 'index-servers') - _servers = [server.strip() for server in - index_servers.split('\n') - if server.strip() != ''] - if _servers == []: - # nothing set, let's try to get the default pypi - if 'pypi' in sections: - _servers = ['pypi'] - else: - # the file is not properly defined, returning - # an empty dict - return {} - for server in _servers: - current = {'server': server} - current['username'] = config.get(server, 'username') - - # optional params - for key, default in (('repository', - self.DEFAULT_REPOSITORY), - ('realm', self.DEFAULT_REALM), - ('password', None)): - if config.has_option(server, key): - current[key] = config.get(server, key) - else: - current[key] = default - - # work around people having "repository" for the "pypi" - # section of their config set to the HTTP (rather than - # HTTPS) URL - if (server == 'pypi' and - repository in (self.DEFAULT_REPOSITORY, 'pypi')): - current['repository'] = self.DEFAULT_REPOSITORY - return current - - if (current['server'] == repository or - current['repository'] == repository): - return current - elif 'server-login' in sections: - # old format - server = 'server-login' - if config.has_option(server, 'repository'): - repository = config.get(server, 'repository') - else: - repository = self.DEFAULT_REPOSITORY - return {'username': config.get(server, 'username'), - 'password': config.get(server, 'password'), - 'repository': repository, - 'server': server, - 'realm': self.DEFAULT_REALM} - - return {} - - def _read_pypi_response(self, response): - """Read and decode a PyPI HTTP response.""" - import cgi - content_type = response.getheader('content-type', 'text/plain') - encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii') - return response.read().decode(encoding) - - def initialize_options(self): - """Initialize options.""" - self.repository = None - self.realm = None - self.show_response = 0 - - def finalize_options(self): - """Finalizes options.""" - if self.repository is None: - self.repository = self.DEFAULT_REPOSITORY - if self.realm is None: - self.realm = self.DEFAULT_REALM diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/core.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/core.py deleted file mode 100755 index f43888e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/core.py +++ /dev/null @@ -1,249 +0,0 @@ -"""distutils.core - -The only module that needs to be imported to use the Distutils; provides -the 'setup' function (which is to be called from the setup script). Also -indirectly provides the Distribution and Command classes, although they are -really defined in distutils.dist and distutils.cmd. -""" - -import os -import sys -import tokenize - -from distutils.debug import DEBUG -from distutils.errors import * - -# Mainly import these so setup scripts can "from distutils.core import" them. -from distutils.dist import Distribution -from distutils.cmd import Command -from distutils.config import PyPIRCCommand -from distutils.extension import Extension - -# This is a barebones help message generated displayed when the user -# runs the setup script with no arguments at all. More useful help -# is generated with various --help options: global help, list commands, -# and per-command help. -USAGE = """\ -usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] - or: %(script)s --help [cmd1 cmd2 ...] - or: %(script)s --help-commands - or: %(script)s cmd --help -""" - -def gen_usage (script_name): - script = os.path.basename(script_name) - return USAGE % vars() - - -# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'. -_setup_stop_after = None -_setup_distribution = None - -# Legal keyword arguments for the setup() function -setup_keywords = ('distclass', 'script_name', 'script_args', 'options', - 'name', 'version', 'author', 'author_email', - 'maintainer', 'maintainer_email', 'url', 'license', - 'description', 'long_description', 'keywords', - 'platforms', 'classifiers', 'download_url', - 'requires', 'provides', 'obsoletes', - ) - -# Legal keyword arguments for the Extension constructor -extension_keywords = ('name', 'sources', 'include_dirs', - 'define_macros', 'undef_macros', - 'library_dirs', 'libraries', 'runtime_library_dirs', - 'extra_objects', 'extra_compile_args', 'extra_link_args', - 'swig_opts', 'export_symbols', 'depends', 'language') - -def setup (**attrs): - """The gateway to the Distutils: do everything your setup script needs - to do, in a highly flexible and user-driven way. Briefly: create a - Distribution instance; find and parse config files; parse the command - line; run each Distutils command found there, customized by the options - supplied to 'setup()' (as keyword arguments), in config files, and on - the command line. - - The Distribution instance might be an instance of a class supplied via - the 'distclass' keyword argument to 'setup'; if no such class is - supplied, then the Distribution class (in dist.py) is instantiated. - All other arguments to 'setup' (except for 'cmdclass') are used to set - attributes of the Distribution instance. - - The 'cmdclass' argument, if supplied, is a dictionary mapping command - names to command classes. Each command encountered on the command line - will be turned into a command class, which is in turn instantiated; any - class found in 'cmdclass' is used in place of the default, which is - (for command 'foo_bar') class 'foo_bar' in module - 'distutils.command.foo_bar'. The command class must provide a - 'user_options' attribute which is a list of option specifiers for - 'distutils.fancy_getopt'. Any command-line options between the current - and the next command are used to set attributes of the current command - object. - - When the entire command-line has been successfully parsed, calls the - 'run()' method on each command object in turn. This method will be - driven entirely by the Distribution object (which each command object - has a reference to, thanks to its constructor), and the - command-specific options that became attributes of each command - object. - """ - - global _setup_stop_after, _setup_distribution - - # Determine the distribution class -- either caller-supplied or - # our Distribution (see below). - klass = attrs.get('distclass') - if klass: - del attrs['distclass'] - else: - klass = Distribution - - if 'script_name' not in attrs: - attrs['script_name'] = os.path.basename(sys.argv[0]) - if 'script_args' not in attrs: - attrs['script_args'] = sys.argv[1:] - - # Create the Distribution instance, using the remaining arguments - # (ie. everything except distclass) to initialize it - try: - _setup_distribution = dist = klass(attrs) - except DistutilsSetupError as msg: - if 'name' not in attrs: - raise SystemExit("error in setup command: %s" % msg) - else: - raise SystemExit("error in %s setup command: %s" % \ - (attrs['name'], msg)) - - if _setup_stop_after == "init": - return dist - - # Find and parse the config file(s): they will override options from - # the setup script, but be overridden by the command line. - dist.parse_config_files() - - if DEBUG: - print("options (after parsing config files):") - dist.dump_option_dicts() - - if _setup_stop_after == "config": - return dist - - # Parse the command line and override config files; any - # command-line errors are the end user's fault, so turn them into - # SystemExit to suppress tracebacks. - try: - ok = dist.parse_command_line() - except DistutilsArgError as msg: - raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg) - - if DEBUG: - print("options (after parsing command line):") - dist.dump_option_dicts() - - if _setup_stop_after == "commandline": - return dist - - # And finally, run all the commands found on the command line. - if ok: - return run_commands(dist) - - return dist - -# setup () - - -def run_commands (dist): - """Given a Distribution object run all the commands, - raising ``SystemExit`` errors in the case of failure. - - This function assumes that either ``sys.argv`` or ``dist.script_args`` - is already set accordingly. - """ - try: - dist.run_commands() - except KeyboardInterrupt: - raise SystemExit("interrupted") - except OSError as exc: - if DEBUG: - sys.stderr.write("error: %s\n" % (exc,)) - raise - else: - raise SystemExit("error: %s" % (exc,)) - - except (DistutilsError, - CCompilerError) as msg: - if DEBUG: - raise - else: - raise SystemExit("error: " + str(msg)) - - return dist - - -def run_setup (script_name, script_args=None, stop_after="run"): - """Run a setup script in a somewhat controlled environment, and - return the Distribution instance that drives things. This is useful - if you need to find out the distribution meta-data (passed as - keyword args from 'script' to 'setup()', or the contents of the - config files or command-line. - - 'script_name' is a file that will be read and run with 'exec()'; - 'sys.argv[0]' will be replaced with 'script' for the duration of the - call. 'script_args' is a list of strings; if supplied, - 'sys.argv[1:]' will be replaced by 'script_args' for the duration of - the call. - - 'stop_after' tells 'setup()' when to stop processing; possible - values: - init - stop after the Distribution instance has been created and - populated with the keyword arguments to 'setup()' - config - stop after config files have been parsed (and their data - stored in the Distribution instance) - commandline - stop after the command-line ('sys.argv[1:]' or 'script_args') - have been parsed (and the data stored in the Distribution) - run [default] - stop after all commands have been run (the same as if 'setup()' - had been called in the usual way - - Returns the Distribution instance, which provides all information - used to drive the Distutils. - """ - if stop_after not in ('init', 'config', 'commandline', 'run'): - raise ValueError("invalid value for 'stop_after': %r" % (stop_after,)) - - global _setup_stop_after, _setup_distribution - _setup_stop_after = stop_after - - save_argv = sys.argv.copy() - g = {'__file__': script_name, '__name__': '__main__'} - try: - try: - sys.argv[0] = script_name - if script_args is not None: - sys.argv[1:] = script_args - # tokenize.open supports automatic encoding detection - with tokenize.open(script_name) as f: - code = f.read().replace(r'\r\n', r'\n') - exec(code, g) - finally: - sys.argv = save_argv - _setup_stop_after = None - except SystemExit: - # Hmm, should we do something if exiting with a non-zero code - # (ie. error)? - pass - - if _setup_distribution is None: - raise RuntimeError(("'distutils.core.setup()' was never called -- " - "perhaps '%s' is not a Distutils setup script?") % \ - script_name) - - # I wonder if the setup script's namespace -- g and l -- would be of - # any interest to callers? - #print "_setup_distribution:", _setup_distribution - return _setup_distribution - -# run_setup () diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/cygwinccompiler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/cygwinccompiler.py deleted file mode 100755 index c5c86d8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/cygwinccompiler.py +++ /dev/null @@ -1,362 +0,0 @@ -"""distutils.cygwinccompiler - -Provides the CygwinCCompiler class, a subclass of UnixCCompiler that -handles the Cygwin port of the GNU C compiler to Windows. It also contains -the Mingw32CCompiler class which handles the mingw32 port of GCC (same as -cygwin in no-cygwin mode). -""" - -# problems: -# -# * if you use a msvc compiled python version (1.5.2) -# 1. you have to insert a __GNUC__ section in its config.h -# 2. you have to generate an import library for its dll -# - create a def-file for python??.dll -# - create an import library using -# dlltool --dllname python15.dll --def python15.def \ -# --output-lib libpython15.a -# -# see also http://starship.python.net/crew/kernr/mingw32/Notes.html -# -# * We put export_symbols in a def-file, and don't use -# --export-all-symbols because it doesn't worked reliable in some -# tested configurations. And because other windows compilers also -# need their symbols specified this no serious problem. -# -# tested configurations: -# -# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works -# (after patching python's config.h and for C++ some other include files) -# see also http://starship.python.net/crew/kernr/mingw32/Notes.html -# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works -# (ld doesn't support -shared, so we use dllwrap) -# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now -# - its dllwrap doesn't work, there is a bug in binutils 2.10.90 -# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html -# - using gcc -mdll instead dllwrap doesn't work without -static because -# it tries to link against dlls instead their import libraries. (If -# it finds the dll first.) -# By specifying -static we force ld to link against the import libraries, -# this is windows standard and there are normally not the necessary symbols -# in the dlls. -# *** only the version of June 2000 shows these problems -# * cygwin gcc 3.2/ld 2.13.90 works -# (ld supports -shared) -# * mingw gcc 3.2/ld 2.13 works -# (ld supports -shared) -# * llvm-mingw with Clang 11 works -# (lld supports -shared) - -import os -import sys -import copy -import shlex -import warnings -from subprocess import check_output - -from distutils.unixccompiler import UnixCCompiler -from distutils.file_util import write_file -from distutils.errors import (DistutilsExecError, CCompilerError, - CompileError, UnknownFileError) -from distutils.version import LooseVersion, suppress_known_deprecation - -def get_msvcr(): - """Include the appropriate MSVC runtime library if Python was built - with MSVC 7.0 or later. - """ - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = sys.version[msc_pos+6:msc_pos+10] - if msc_ver == '1300': - # MSVC 7.0 - return ['msvcr70'] - elif msc_ver == '1310': - # MSVC 7.1 - return ['msvcr71'] - elif msc_ver == '1400': - # VS2005 / MSVC 8.0 - return ['msvcr80'] - elif msc_ver == '1500': - # VS2008 / MSVC 9.0 - return ['msvcr90'] - elif msc_ver == '1600': - # VS2010 / MSVC 10.0 - return ['msvcr100'] - elif msc_ver == '1700': - # VS2012 / MSVC 11.0 - return ['msvcr110'] - elif msc_ver == '1800': - # VS2013 / MSVC 12.0 - return ['msvcr120'] - elif 1900 <= int(msc_ver) < 2000: - # VS2015 / MSVC 14.0 - return ['ucrt', 'vcruntime140'] - else: - raise ValueError("Unknown MS Compiler version %s " % msc_ver) - - -class CygwinCCompiler(UnixCCompiler): - """ Handles the Cygwin port of the GNU C compiler to Windows. - """ - compiler_type = 'cygwin' - obj_extension = ".o" - static_lib_extension = ".a" - shared_lib_extension = ".dll" - static_lib_format = "lib%s%s" - shared_lib_format = "%s%s" - exe_extension = ".exe" - - def __init__(self, verbose=0, dry_run=0, force=0): - - super().__init__(verbose, dry_run, force) - - status, details = check_config_h() - self.debug_print("Python's GCC status: %s (details: %s)" % - (status, details)) - if status is not CONFIG_H_OK: - self.warn( - "Python's pyconfig.h doesn't seem to support your compiler. " - "Reason: %s. " - "Compiling may fail because of undefined preprocessor macros." - % details) - - self.cc = os.environ.get('CC', 'gcc') - self.cxx = os.environ.get('CXX', 'g++') - - self.linker_dll = self.cc - shared_option = "-shared" - - self.set_executables(compiler='%s -mcygwin -O -Wall' % self.cc, - compiler_so='%s -mcygwin -mdll -O -Wall' % self.cc, - compiler_cxx='%s -mcygwin -O -Wall' % self.cxx, - linker_exe='%s -mcygwin' % self.cc, - linker_so=('%s -mcygwin %s' % - (self.linker_dll, shared_option))) - - # Include the appropriate MSVC runtime library if Python was built - # with MSVC 7.0 or later. - self.dll_libraries = get_msvcr() - - @property - def gcc_version(self): - # Older numpy dependend on this existing to check for ancient - # gcc versions. This doesn't make much sense with clang etc so - # just hardcode to something recent. - # https://github.com/numpy/numpy/pull/20333 - warnings.warn( - "gcc_version attribute of CygwinCCompiler is deprecated. " - "Instead of returning actual gcc version a fixed value 11.2.0 is returned.", - DeprecationWarning, - stacklevel=2, - ) - with suppress_known_deprecation(): - return LooseVersion("11.2.0") - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compiles the source by spawning GCC and windres if needed.""" - if ext == '.rc' or ext == '.res': - # gcc needs '.res' and '.rc' compiled to object files !!! - try: - self.spawn(["windres", "-i", src, "-o", obj]) - except DistutilsExecError as msg: - raise CompileError(msg) - else: # for other files use the C-compiler - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + - extra_postargs) - except DistutilsExecError as msg: - raise CompileError(msg) - - def link(self, target_desc, objects, output_filename, output_dir=None, - libraries=None, library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - """Link the objects.""" - # use separate copies, so we can modify the lists - extra_preargs = copy.copy(extra_preargs or []) - libraries = copy.copy(libraries or []) - objects = copy.copy(objects or []) - - # Additional libraries - libraries.extend(self.dll_libraries) - - # handle export symbols by creating a def-file - # with executables this only works with gcc/ld as linker - if ((export_symbols is not None) and - (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): - # (The linker doesn't do anything if output is up-to-date. - # So it would probably better to check if we really need this, - # but for this we had to insert some unchanged parts of - # UnixCCompiler, and this is not what we want.) - - # we want to put some files in the same directory as the - # object files are, build_temp doesn't help much - # where are the object files - temp_dir = os.path.dirname(objects[0]) - # name of dll to give the helper files the same base name - (dll_name, dll_extension) = os.path.splitext( - os.path.basename(output_filename)) - - # generate the filenames for these files - def_file = os.path.join(temp_dir, dll_name + ".def") - lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a") - - # Generate .def file - contents = [ - "LIBRARY %s" % os.path.basename(output_filename), - "EXPORTS"] - for sym in export_symbols: - contents.append(sym) - self.execute(write_file, (def_file, contents), - "writing %s" % def_file) - - # next add options for def-file and to creating import libraries - - # doesn't work: bfd_close build\...\libfoo.a: Invalid operation - #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file]) - # for gcc/ld the def-file is specified as any object files - objects.append(def_file) - - #end: if ((export_symbols is not None) and - # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): - - # who wants symbols and a many times larger output file - # should explicitly switch the debug mode on - # otherwise we let ld strip the output file - # (On my machine: 10KiB < stripped_file < ??100KiB - # unstripped_file = stripped_file + XXX KiB - # ( XXX=254 for a typical python extension)) - if not debug: - extra_preargs.append("-s") - - UnixCCompiler.link(self, target_desc, objects, output_filename, - output_dir, libraries, library_dirs, - runtime_library_dirs, - None, # export_symbols, we do this in our def-file - debug, extra_preargs, extra_postargs, build_temp, - target_lang) - - # -- Miscellaneous methods ----------------------------------------- - - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """Adds supports for rc and res files.""" - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - base, ext = os.path.splitext(os.path.normcase(src_name)) - if ext not in (self.src_extensions + ['.rc','.res']): - raise UnknownFileError("unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext in ('.res', '.rc'): - # these need to be compiled to object files - obj_names.append (os.path.join(output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join(output_dir, - base + self.obj_extension)) - return obj_names - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(CygwinCCompiler): - """ Handles the Mingw32 port of the GNU C compiler to Windows. - """ - compiler_type = 'mingw32' - - def __init__(self, verbose=0, dry_run=0, force=0): - - super().__init__ (verbose, dry_run, force) - - shared_option = "-shared" - - if is_cygwincc(self.cc): - raise CCompilerError( - 'Cygwin gcc cannot be used with --compiler=mingw32') - - self.set_executables(compiler='%s -O -Wall' % self.cc, - compiler_so='%s -mdll -O -Wall' % self.cc, - compiler_cxx='%s -O -Wall' % self.cxx, - linker_exe='%s' % self.cc, - linker_so='%s %s' - % (self.linker_dll, shared_option)) - - # Maybe we should also append -mthreads, but then the finished - # dlls need another dll (mingwm10.dll see Mingw32 docs) - # (-mthreads: Support thread-safe exception handling on `Mingw32') - - # no additional libraries needed - self.dll_libraries=[] - - # Include the appropriate MSVC runtime library if Python was built - # with MSVC 7.0 or later. - self.dll_libraries = get_msvcr() - -# Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using an unmodified -# version. - -CONFIG_H_OK = "ok" -CONFIG_H_NOTOK = "not ok" -CONFIG_H_UNCERTAIN = "uncertain" - -def check_config_h(): - """Check if the current Python installation appears amenable to building - extensions with GCC. - - Returns a tuple (status, details), where 'status' is one of the following - constants: - - - CONFIG_H_OK: all is well, go ahead and compile - - CONFIG_H_NOTOK: doesn't look good - - CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h - - 'details' is a human-readable string explaining the situation. - - Note there are two ways to conclude "OK": either 'sys.version' contains - the string "GCC" (implying that this Python was built with GCC), or the - installed "pyconfig.h" contains the string "__GNUC__". - """ - - # XXX since this function also checks sys.version, it's not strictly a - # "pyconfig.h" check -- should probably be renamed... - - from distutils import sysconfig - - # if sys.version contains GCC then python was compiled with GCC, and the - # pyconfig.h file should be OK - if "GCC" in sys.version: - return CONFIG_H_OK, "sys.version mentions 'GCC'" - - # Clang would also work - if "Clang" in sys.version: - return CONFIG_H_OK, "sys.version mentions 'Clang'" - - # let's see if __GNUC__ is mentioned in python.h - fn = sysconfig.get_config_h_filename() - try: - config_h = open(fn) - try: - if "__GNUC__" in config_h.read(): - return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn - else: - return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn - finally: - config_h.close() - except OSError as exc: - return (CONFIG_H_UNCERTAIN, - "couldn't read '%s': %s" % (fn, exc.strerror)) - -def is_cygwincc(cc): - '''Try to determine if the compiler that would be used is from cygwin.''' - out_string = check_output(shlex.split(cc) + ['-dumpmachine']) - return out_string.strip().endswith(b'cygwin') - - -get_versions = None -""" -A stand-in for the previous get_versions() function to prevent failures -when monkeypatched. See pypa/setuptools#2969. -""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/debug.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/debug.py deleted file mode 100755 index daf1660..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/debug.py +++ /dev/null @@ -1,5 +0,0 @@ -import os - -# If DISTUTILS_DEBUG is anything other than the empty string, we run in -# debug mode. -DEBUG = os.environ.get('DISTUTILS_DEBUG') diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dep_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dep_util.py deleted file mode 100755 index d74f5e4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dep_util.py +++ /dev/null @@ -1,92 +0,0 @@ -"""distutils.dep_util - -Utility functions for simple, timestamp-based dependency of files -and groups of files; also, function based entirely on such -timestamp dependency analysis.""" - -import os -from distutils.errors import DistutilsFileError - - -def newer (source, target): - """Return true if 'source' exists and is more recently modified than - 'target', or if 'source' exists and 'target' doesn't. Return false if - both exist and 'target' is the same age or younger than 'source'. - Raise DistutilsFileError if 'source' does not exist. - """ - if not os.path.exists(source): - raise DistutilsFileError("file '%s' does not exist" % - os.path.abspath(source)) - if not os.path.exists(target): - return 1 - - from stat import ST_MTIME - mtime1 = os.stat(source)[ST_MTIME] - mtime2 = os.stat(target)[ST_MTIME] - - return mtime1 > mtime2 - -# newer () - - -def newer_pairwise (sources, targets): - """Walk two filename lists in parallel, testing if each source is newer - than its corresponding target. Return a pair of lists (sources, - targets) where source is newer than target, according to the semantics - of 'newer()'. - """ - if len(sources) != len(targets): - raise ValueError("'sources' and 'targets' must be same length") - - # build a pair of lists (sources, targets) where source is newer - n_sources = [] - n_targets = [] - for i in range(len(sources)): - if newer(sources[i], targets[i]): - n_sources.append(sources[i]) - n_targets.append(targets[i]) - - return (n_sources, n_targets) - -# newer_pairwise () - - -def newer_group (sources, target, missing='error'): - """Return true if 'target' is out-of-date with respect to any file - listed in 'sources'. In other words, if 'target' exists and is newer - than every file in 'sources', return false; otherwise return true. - 'missing' controls what we do when a source file is missing; the - default ("error") is to blow up with an OSError from inside 'stat()'; - if it is "ignore", we silently drop any missing source files; if it is - "newer", any missing source files make us assume that 'target' is - out-of-date (this is handy in "dry-run" mode: it'll make you pretend to - carry out commands that wouldn't work because inputs are missing, but - that doesn't matter because you're not actually going to run the - commands). - """ - # If the target doesn't even exist, then it's definitely out-of-date. - if not os.path.exists(target): - return 1 - - # Otherwise we have to find out the hard way: if *any* source file - # is more recent than 'target', then 'target' is out-of-date and - # we can immediately return true. If we fall through to the end - # of the loop, then 'target' is up-to-date and we return false. - from stat import ST_MTIME - target_mtime = os.stat(target)[ST_MTIME] - for source in sources: - if not os.path.exists(source): - if missing == 'error': # blow up when we stat() the file - pass - elif missing == 'ignore': # missing source dropped from - continue # target's dependency list - elif missing == 'newer': # missing source means target is - return 1 # out-of-date - - source_mtime = os.stat(source)[ST_MTIME] - if source_mtime > target_mtime: - return 1 - else: - return 0 - -# newer_group () diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dir_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dir_util.py deleted file mode 100755 index d5cd8e3..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dir_util.py +++ /dev/null @@ -1,210 +0,0 @@ -"""distutils.dir_util - -Utility functions for manipulating directories and directory trees.""" - -import os -import errno -from distutils.errors import DistutilsFileError, DistutilsInternalError -from distutils import log - -# cache for by mkpath() -- in addition to cheapening redundant calls, -# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode -_path_created = {} - -# I don't use os.makedirs because a) it's new to Python 1.5.2, and -# b) it blows up if the directory already exists (I want to silently -# succeed in that case). -def mkpath(name, mode=0o777, verbose=1, dry_run=0): - """Create a directory and any missing ancestor directories. - - If the directory already exists (or if 'name' is the empty string, which - means the current directory, which of course exists), then do nothing. - Raise DistutilsFileError if unable to create some directory along the way - (eg. some sub-path exists, but is a file rather than a directory). - If 'verbose' is true, print a one-line summary of each mkdir to stdout. - Return the list of directories actually created. - """ - - global _path_created - - # Detect a common bug -- name is None - if not isinstance(name, str): - raise DistutilsInternalError( - "mkpath: 'name' must be a string (got %r)" % (name,)) - - # XXX what's the better way to handle verbosity? print as we create - # each directory in the path (the current behaviour), or only announce - # the creation of the whole path? (quite easy to do the latter since - # we're not using a recursive algorithm) - - name = os.path.normpath(name) - created_dirs = [] - if os.path.isdir(name) or name == '': - return created_dirs - if _path_created.get(os.path.abspath(name)): - return created_dirs - - (head, tail) = os.path.split(name) - tails = [tail] # stack of lone dirs to create - - while head and tail and not os.path.isdir(head): - (head, tail) = os.path.split(head) - tails.insert(0, tail) # push next higher dir onto stack - - # now 'head' contains the deepest directory that already exists - # (that is, the child of 'head' in 'name' is the highest directory - # that does *not* exist) - for d in tails: - #print "head = %s, d = %s: " % (head, d), - head = os.path.join(head, d) - abs_head = os.path.abspath(head) - - if _path_created.get(abs_head): - continue - - if verbose >= 1: - log.info("creating %s", head) - - if not dry_run: - try: - os.mkdir(head, mode) - except OSError as exc: - if not (exc.errno == errno.EEXIST and os.path.isdir(head)): - raise DistutilsFileError( - "could not create '%s': %s" % (head, exc.args[-1])) - created_dirs.append(head) - - _path_created[abs_head] = 1 - return created_dirs - -def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0): - """Create all the empty directories under 'base_dir' needed to put 'files' - there. - - 'base_dir' is just the name of a directory which doesn't necessarily - exist yet; 'files' is a list of filenames to be interpreted relative to - 'base_dir'. 'base_dir' + the directory portion of every file in 'files' - will be created if it doesn't already exist. 'mode', 'verbose' and - 'dry_run' flags are as for 'mkpath()'. - """ - # First get the list of directories to create - need_dir = set() - for file in files: - need_dir.add(os.path.join(base_dir, os.path.dirname(file))) - - # Now create them - for dir in sorted(need_dir): - mkpath(dir, mode, verbose=verbose, dry_run=dry_run) - -def copy_tree(src, dst, preserve_mode=1, preserve_times=1, - preserve_symlinks=0, update=0, verbose=1, dry_run=0): - """Copy an entire directory tree 'src' to a new location 'dst'. - - Both 'src' and 'dst' must be directory names. If 'src' is not a - directory, raise DistutilsFileError. If 'dst' does not exist, it is - created with 'mkpath()'. The end result of the copy is that every - file in 'src' is copied to 'dst', and directories under 'src' are - recursively copied to 'dst'. Return the list of files that were - copied or might have been copied, using their output name. The - return value is unaffected by 'update' or 'dry_run': it is simply - the list of all files under 'src', with the names changed to be - under 'dst'. - - 'preserve_mode' and 'preserve_times' are the same as for - 'copy_file'; note that they only apply to regular files, not to - directories. If 'preserve_symlinks' is true, symlinks will be - copied as symlinks (on platforms that support them!); otherwise - (the default), the destination of the symlink will be copied. - 'update' and 'verbose' are the same as for 'copy_file'. - """ - from distutils.file_util import copy_file - - if not dry_run and not os.path.isdir(src): - raise DistutilsFileError( - "cannot copy tree '%s': not a directory" % src) - try: - names = os.listdir(src) - except OSError as e: - if dry_run: - names = [] - else: - raise DistutilsFileError( - "error listing files in '%s': %s" % (src, e.strerror)) - - if not dry_run: - mkpath(dst, verbose=verbose) - - outputs = [] - - for n in names: - src_name = os.path.join(src, n) - dst_name = os.path.join(dst, n) - - if n.startswith('.nfs'): - # skip NFS rename files - continue - - if preserve_symlinks and os.path.islink(src_name): - link_dest = os.readlink(src_name) - if verbose >= 1: - log.info("linking %s -> %s", dst_name, link_dest) - if not dry_run: - os.symlink(link_dest, dst_name) - outputs.append(dst_name) - - elif os.path.isdir(src_name): - outputs.extend( - copy_tree(src_name, dst_name, preserve_mode, - preserve_times, preserve_symlinks, update, - verbose=verbose, dry_run=dry_run)) - else: - copy_file(src_name, dst_name, preserve_mode, - preserve_times, update, verbose=verbose, - dry_run=dry_run) - outputs.append(dst_name) - - return outputs - -def _build_cmdtuple(path, cmdtuples): - """Helper for remove_tree().""" - for f in os.listdir(path): - real_f = os.path.join(path,f) - if os.path.isdir(real_f) and not os.path.islink(real_f): - _build_cmdtuple(real_f, cmdtuples) - else: - cmdtuples.append((os.remove, real_f)) - cmdtuples.append((os.rmdir, path)) - -def remove_tree(directory, verbose=1, dry_run=0): - """Recursively remove an entire directory tree. - - Any errors are ignored (apart from being reported to stdout if 'verbose' - is true). - """ - global _path_created - - if verbose >= 1: - log.info("removing '%s' (and everything under it)", directory) - if dry_run: - return - cmdtuples = [] - _build_cmdtuple(directory, cmdtuples) - for cmd in cmdtuples: - try: - cmd[0](cmd[1]) - # remove dir from cache if it's already there - abspath = os.path.abspath(cmd[1]) - if abspath in _path_created: - del _path_created[abspath] - except OSError as exc: - log.warn("error removing %s: %s", directory, exc) - -def ensure_relative(path): - """Take the full path 'path', and make it a relative path. - - This is useful to make 'path' the second argument to os.path.join(). - """ - drive, path = os.path.splitdrive(path) - if path[0:1] == os.sep: - path = drive + path[1:] - return path diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dist.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dist.py deleted file mode 100755 index 37db4d6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/dist.py +++ /dev/null @@ -1,1257 +0,0 @@ -"""distutils.dist - -Provides the Distribution class, which represents the module distribution -being built/installed/distributed. -""" - -import sys -import os -import re -from email import message_from_file - -try: - import warnings -except ImportError: - warnings = None - -from distutils.errors import * -from distutils.fancy_getopt import FancyGetopt, translate_longopt -from distutils.util import check_environ, strtobool, rfc822_escape -from distutils import log -from distutils.debug import DEBUG - -# Regex to define acceptable Distutils command names. This is not *quite* -# the same as a Python NAME -- I don't allow leading underscores. The fact -# that they're very similar is no coincidence; the default naming scheme is -# to look for a Python module named after the command. -command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$') - - -def _ensure_list(value, fieldname): - if isinstance(value, str): - # a string containing comma separated values is okay. It will - # be converted to a list by Distribution.finalize_options(). - pass - elif not isinstance(value, list): - # passing a tuple or an iterator perhaps, warn and convert - typename = type(value).__name__ - msg = "Warning: '{fieldname}' should be a list, got type '{typename}'" - msg = msg.format(**locals()) - log.log(log.WARN, msg) - value = list(value) - return value - - -class Distribution: - """The core of the Distutils. Most of the work hiding behind 'setup' - is really done within a Distribution instance, which farms the work out - to the Distutils commands specified on the command line. - - Setup scripts will almost never instantiate Distribution directly, - unless the 'setup()' function is totally inadequate to their needs. - However, it is conceivable that a setup script might wish to subclass - Distribution for some specialized purpose, and then pass the subclass - to 'setup()' as the 'distclass' keyword argument. If so, it is - necessary to respect the expectations that 'setup' has of Distribution. - See the code for 'setup()', in core.py, for details. - """ - - # 'global_options' describes the command-line options that may be - # supplied to the setup script prior to any actual commands. - # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of - # these global options. This list should be kept to a bare minimum, - # since every global option is also valid as a command option -- and we - # don't want to pollute the commands with too many options that they - # have minimal control over. - # The fourth entry for verbose means that it can be repeated. - global_options = [ - ('verbose', 'v', "run verbosely (default)", 1), - ('quiet', 'q', "run quietly (turns verbosity off)"), - ('dry-run', 'n', "don't actually do anything"), - ('help', 'h', "show detailed help message"), - ('no-user-cfg', None, - 'ignore pydistutils.cfg in your home directory'), - ] - - # 'common_usage' is a short (2-3 line) string describing the common - # usage of the setup script. - common_usage = """\ -Common commands: (see '--help-commands' for more) - - setup.py build will build the package underneath 'build/' - setup.py install will install the package -""" - - # options that are not propagated to the commands - display_options = [ - ('help-commands', None, - "list all available commands"), - ('name', None, - "print package name"), - ('version', 'V', - "print package version"), - ('fullname', None, - "print -"), - ('author', None, - "print the author's name"), - ('author-email', None, - "print the author's email address"), - ('maintainer', None, - "print the maintainer's name"), - ('maintainer-email', None, - "print the maintainer's email address"), - ('contact', None, - "print the maintainer's name if known, else the author's"), - ('contact-email', None, - "print the maintainer's email address if known, else the author's"), - ('url', None, - "print the URL for this package"), - ('license', None, - "print the license of the package"), - ('licence', None, - "alias for --license"), - ('description', None, - "print the package description"), - ('long-description', None, - "print the long package description"), - ('platforms', None, - "print the list of platforms"), - ('classifiers', None, - "print the list of classifiers"), - ('keywords', None, - "print the list of keywords"), - ('provides', None, - "print the list of packages/modules provided"), - ('requires', None, - "print the list of packages/modules required"), - ('obsoletes', None, - "print the list of packages/modules made obsolete") - ] - display_option_names = [translate_longopt(x[0]) for x in display_options] - - # negative options are options that exclude other options - negative_opt = {'quiet': 'verbose'} - - # -- Creation/initialization methods ------------------------------- - - def __init__(self, attrs=None): - """Construct a new Distribution instance: initialize all the - attributes of a Distribution, and then use 'attrs' (a dictionary - mapping attribute names to values) to assign some of those - attributes their "real" values. (Any attributes not mentioned in - 'attrs' will be assigned to some null value: 0, None, an empty list - or dictionary, etc.) Most importantly, initialize the - 'command_obj' attribute to the empty dictionary; this will be - filled in with real command objects by 'parse_command_line()'. - """ - - # Default values for our command-line options - self.verbose = 1 - self.dry_run = 0 - self.help = 0 - for attr in self.display_option_names: - setattr(self, attr, 0) - - # Store the distribution meta-data (name, version, author, and so - # forth) in a separate object -- we're getting to have enough - # information here (and enough command-line options) that it's - # worth it. Also delegate 'get_XXX()' methods to the 'metadata' - # object in a sneaky and underhanded (but efficient!) way. - self.metadata = DistributionMetadata() - for basename in self.metadata._METHOD_BASENAMES: - method_name = "get_" + basename - setattr(self, method_name, getattr(self.metadata, method_name)) - - # 'cmdclass' maps command names to class objects, so we - # can 1) quickly figure out which class to instantiate when - # we need to create a new command object, and 2) have a way - # for the setup script to override command classes - self.cmdclass = {} - - # 'command_packages' is a list of packages in which commands - # are searched for. The factory for command 'foo' is expected - # to be named 'foo' in the module 'foo' in one of the packages - # named here. This list is searched from the left; an error - # is raised if no named package provides the command being - # searched for. (Always access using get_command_packages().) - self.command_packages = None - - # 'script_name' and 'script_args' are usually set to sys.argv[0] - # and sys.argv[1:], but they can be overridden when the caller is - # not necessarily a setup script run from the command-line. - self.script_name = None - self.script_args = None - - # 'command_options' is where we store command options between - # parsing them (from config files, the command-line, etc.) and when - # they are actually needed -- ie. when the command in question is - # instantiated. It is a dictionary of dictionaries of 2-tuples: - # command_options = { command_name : { option : (source, value) } } - self.command_options = {} - - # 'dist_files' is the list of (command, pyversion, file) that - # have been created by any dist commands run so far. This is - # filled regardless of whether the run is dry or not. pyversion - # gives sysconfig.get_python_version() if the dist file is - # specific to a Python version, 'any' if it is good for all - # Python versions on the target platform, and '' for a source - # file. pyversion should not be used to specify minimum or - # maximum required Python versions; use the metainfo for that - # instead. - self.dist_files = [] - - # These options are really the business of various commands, rather - # than of the Distribution itself. We provide aliases for them in - # Distribution as a convenience to the developer. - self.packages = None - self.package_data = {} - self.package_dir = None - self.py_modules = None - self.libraries = None - self.headers = None - self.ext_modules = None - self.ext_package = None - self.include_dirs = None - self.extra_path = None - self.scripts = None - self.data_files = None - self.password = '' - - # And now initialize bookkeeping stuff that can't be supplied by - # the caller at all. 'command_obj' maps command names to - # Command instances -- that's how we enforce that every command - # class is a singleton. - self.command_obj = {} - - # 'have_run' maps command names to boolean values; it keeps track - # of whether we have actually run a particular command, to make it - # cheap to "run" a command whenever we think we might need to -- if - # it's already been done, no need for expensive filesystem - # operations, we just check the 'have_run' dictionary and carry on. - # It's only safe to query 'have_run' for a command class that has - # been instantiated -- a false value will be inserted when the - # command object is created, and replaced with a true value when - # the command is successfully run. Thus it's probably best to use - # '.get()' rather than a straight lookup. - self.have_run = {} - - # Now we'll use the attrs dictionary (ultimately, keyword args from - # the setup script) to possibly override any or all of these - # distribution options. - - if attrs: - # Pull out the set of command options and work on them - # specifically. Note that this order guarantees that aliased - # command options will override any supplied redundantly - # through the general options dictionary. - options = attrs.get('options') - if options is not None: - del attrs['options'] - for (command, cmd_options) in options.items(): - opt_dict = self.get_option_dict(command) - for (opt, val) in cmd_options.items(): - opt_dict[opt] = ("setup script", val) - - if 'licence' in attrs: - attrs['license'] = attrs['licence'] - del attrs['licence'] - msg = "'licence' distribution option is deprecated; use 'license'" - if warnings is not None: - warnings.warn(msg) - else: - sys.stderr.write(msg + "\n") - - # Now work on the rest of the attributes. Any attribute that's - # not already defined is invalid! - for (key, val) in attrs.items(): - if hasattr(self.metadata, "set_" + key): - getattr(self.metadata, "set_" + key)(val) - elif hasattr(self.metadata, key): - setattr(self.metadata, key, val) - elif hasattr(self, key): - setattr(self, key, val) - else: - msg = "Unknown distribution option: %s" % repr(key) - warnings.warn(msg) - - # no-user-cfg is handled before other command line args - # because other args override the config files, and this - # one is needed before we can load the config files. - # If attrs['script_args'] wasn't passed, assume false. - # - # This also make sure we just look at the global options - self.want_user_cfg = True - - if self.script_args is not None: - for arg in self.script_args: - if not arg.startswith('-'): - break - if arg == '--no-user-cfg': - self.want_user_cfg = False - break - - self.finalize_options() - - def get_option_dict(self, command): - """Get the option dictionary for a given command. If that - command's option dictionary hasn't been created yet, then create it - and return the new dictionary; otherwise, return the existing - option dictionary. - """ - dict = self.command_options.get(command) - if dict is None: - dict = self.command_options[command] = {} - return dict - - def dump_option_dicts(self, header=None, commands=None, indent=""): - from pprint import pformat - - if commands is None: # dump all command option dicts - commands = sorted(self.command_options.keys()) - - if header is not None: - self.announce(indent + header) - indent = indent + " " - - if not commands: - self.announce(indent + "no commands known yet") - return - - for cmd_name in commands: - opt_dict = self.command_options.get(cmd_name) - if opt_dict is None: - self.announce(indent + - "no option dict for '%s' command" % cmd_name) - else: - self.announce(indent + - "option dict for '%s' command:" % cmd_name) - out = pformat(opt_dict) - for line in out.split('\n'): - self.announce(indent + " " + line) - - # -- Config file finding/parsing methods --------------------------- - - def find_config_files(self): - """Find as many configuration files as should be processed for this - platform, and return a list of filenames in the order in which they - should be parsed. The filenames returned are guaranteed to exist - (modulo nasty race conditions). - - There are three possible config files: distutils.cfg in the - Distutils installation directory (ie. where the top-level - Distutils __inst__.py file lives), a file in the user's home - directory named .pydistutils.cfg on Unix and pydistutils.cfg - on Windows/Mac; and setup.cfg in the current directory. - - The file in the user's home directory can be disabled with the - --no-user-cfg option. - """ - files = [] - check_environ() - - # Where to look for the system-wide Distutils config file - sys_dir = os.path.dirname(sys.modules['distutils'].__file__) - - # Look for the system config file - sys_file = os.path.join(sys_dir, "distutils.cfg") - if os.path.isfile(sys_file): - files.append(sys_file) - - # What to call the per-user config file - if os.name == 'posix': - user_filename = ".pydistutils.cfg" - else: - user_filename = "pydistutils.cfg" - - # And look for the user config file - if self.want_user_cfg: - user_file = os.path.join(os.path.expanduser('~'), user_filename) - if os.path.isfile(user_file): - files.append(user_file) - - # All platforms support local setup.cfg - local_file = "setup.cfg" - if os.path.isfile(local_file): - files.append(local_file) - - if DEBUG: - self.announce("using config files: %s" % ', '.join(files)) - - return files - - def parse_config_files(self, filenames=None): - from configparser import ConfigParser - - # Ignore install directory options if we have a venv - if sys.prefix != sys.base_prefix: - ignore_options = [ - 'install-base', 'install-platbase', 'install-lib', - 'install-platlib', 'install-purelib', 'install-headers', - 'install-scripts', 'install-data', 'prefix', 'exec-prefix', - 'home', 'user', 'root'] - else: - ignore_options = [] - - ignore_options = frozenset(ignore_options) - - if filenames is None: - filenames = self.find_config_files() - - if DEBUG: - self.announce("Distribution.parse_config_files():") - - parser = ConfigParser() - for filename in filenames: - if DEBUG: - self.announce(" reading %s" % filename) - parser.read(filename) - for section in parser.sections(): - options = parser.options(section) - opt_dict = self.get_option_dict(section) - - for opt in options: - if opt != '__name__' and opt not in ignore_options: - val = parser.get(section,opt) - opt = opt.replace('-', '_') - opt_dict[opt] = (filename, val) - - # Make the ConfigParser forget everything (so we retain - # the original filenames that options come from) - parser.__init__() - - # If there was a "global" section in the config file, use it - # to set Distribution options. - - if 'global' in self.command_options: - for (opt, (src, val)) in self.command_options['global'].items(): - alias = self.negative_opt.get(opt) - try: - if alias: - setattr(self, alias, not strtobool(val)) - elif opt in ('verbose', 'dry_run'): # ugh! - setattr(self, opt, strtobool(val)) - else: - setattr(self, opt, val) - except ValueError as msg: - raise DistutilsOptionError(msg) - - # -- Command-line parsing methods ---------------------------------- - - def parse_command_line(self): - """Parse the setup script's command line, taken from the - 'script_args' instance attribute (which defaults to 'sys.argv[1:]' - -- see 'setup()' in core.py). This list is first processed for - "global options" -- options that set attributes of the Distribution - instance. Then, it is alternately scanned for Distutils commands - and options for that command. Each new command terminates the - options for the previous command. The allowed options for a - command are determined by the 'user_options' attribute of the - command class -- thus, we have to be able to load command classes - in order to parse the command line. Any error in that 'options' - attribute raises DistutilsGetoptError; any error on the - command-line raises DistutilsArgError. If no Distutils commands - were found on the command line, raises DistutilsArgError. Return - true if command-line was successfully parsed and we should carry - on with executing commands; false if no errors but we shouldn't - execute commands (currently, this only happens if user asks for - help). - """ - # - # We now have enough information to show the Macintosh dialog - # that allows the user to interactively specify the "command line". - # - toplevel_options = self._get_toplevel_options() - - # We have to parse the command line a bit at a time -- global - # options, then the first command, then its options, and so on -- - # because each command will be handled by a different class, and - # the options that are valid for a particular class aren't known - # until we have loaded the command class, which doesn't happen - # until we know what the command is. - - self.commands = [] - parser = FancyGetopt(toplevel_options + self.display_options) - parser.set_negative_aliases(self.negative_opt) - parser.set_aliases({'licence': 'license'}) - args = parser.getopt(args=self.script_args, object=self) - option_order = parser.get_option_order() - log.set_verbosity(self.verbose) - - # for display options we return immediately - if self.handle_display_options(option_order): - return - while args: - args = self._parse_command_opts(parser, args) - if args is None: # user asked for help (and got it) - return - - # Handle the cases of --help as a "global" option, ie. - # "setup.py --help" and "setup.py --help command ...". For the - # former, we show global options (--verbose, --dry-run, etc.) - # and display-only options (--name, --version, etc.); for the - # latter, we omit the display-only options and show help for - # each command listed on the command line. - if self.help: - self._show_help(parser, - display_options=len(self.commands) == 0, - commands=self.commands) - return - - # Oops, no commands found -- an end-user error - if not self.commands: - raise DistutilsArgError("no commands supplied") - - # All is well: return true - return True - - def _get_toplevel_options(self): - """Return the non-display options recognized at the top level. - - This includes options that are recognized *only* at the top - level as well as options recognized for commands. - """ - return self.global_options + [ - ("command-packages=", None, - "list of packages that provide distutils commands"), - ] - - def _parse_command_opts(self, parser, args): - """Parse the command-line options for a single command. - 'parser' must be a FancyGetopt instance; 'args' must be the list - of arguments, starting with the current command (whose options - we are about to parse). Returns a new version of 'args' with - the next command at the front of the list; will be the empty - list if there are no more commands on the command line. Returns - None if the user asked for help on this command. - """ - # late import because of mutual dependence between these modules - from distutils.cmd import Command - - # Pull the current command from the head of the command line - command = args[0] - if not command_re.match(command): - raise SystemExit("invalid command name '%s'" % command) - self.commands.append(command) - - # Dig up the command class that implements this command, so we - # 1) know that it's a valid command, and 2) know which options - # it takes. - try: - cmd_class = self.get_command_class(command) - except DistutilsModuleError as msg: - raise DistutilsArgError(msg) - - # Require that the command class be derived from Command -- want - # to be sure that the basic "command" interface is implemented. - if not issubclass(cmd_class, Command): - raise DistutilsClassError( - "command class %s must subclass Command" % cmd_class) - - # Also make sure that the command object provides a list of its - # known options. - if not (hasattr(cmd_class, 'user_options') and - isinstance(cmd_class.user_options, list)): - msg = ("command class %s must provide " - "'user_options' attribute (a list of tuples)") - raise DistutilsClassError(msg % cmd_class) - - # If the command class has a list of negative alias options, - # merge it in with the global negative aliases. - negative_opt = self.negative_opt - if hasattr(cmd_class, 'negative_opt'): - negative_opt = negative_opt.copy() - negative_opt.update(cmd_class.negative_opt) - - # Check for help_options in command class. They have a different - # format (tuple of four) so we need to preprocess them here. - if (hasattr(cmd_class, 'help_options') and - isinstance(cmd_class.help_options, list)): - help_options = fix_help_options(cmd_class.help_options) - else: - help_options = [] - - # All commands support the global options too, just by adding - # in 'global_options'. - parser.set_option_table(self.global_options + - cmd_class.user_options + - help_options) - parser.set_negative_aliases(negative_opt) - (args, opts) = parser.getopt(args[1:]) - if hasattr(opts, 'help') and opts.help: - self._show_help(parser, display_options=0, commands=[cmd_class]) - return - - if (hasattr(cmd_class, 'help_options') and - isinstance(cmd_class.help_options, list)): - help_option_found=0 - for (help_option, short, desc, func) in cmd_class.help_options: - if hasattr(opts, parser.get_attr_name(help_option)): - help_option_found=1 - if callable(func): - func() - else: - raise DistutilsClassError( - "invalid help function %r for help option '%s': " - "must be a callable object (function, etc.)" - % (func, help_option)) - - if help_option_found: - return - - # Put the options from the command-line into their official - # holding pen, the 'command_options' dictionary. - opt_dict = self.get_option_dict(command) - for (name, value) in vars(opts).items(): - opt_dict[name] = ("command line", value) - - return args - - def finalize_options(self): - """Set final values for all the options on the Distribution - instance, analogous to the .finalize_options() method of Command - objects. - """ - for attr in ('keywords', 'platforms'): - value = getattr(self.metadata, attr) - if value is None: - continue - if isinstance(value, str): - value = [elm.strip() for elm in value.split(',')] - setattr(self.metadata, attr, value) - - def _show_help(self, parser, global_options=1, display_options=1, - commands=[]): - """Show help for the setup script command-line in the form of - several lists of command-line options. 'parser' should be a - FancyGetopt instance; do not expect it to be returned in the - same state, as its option table will be reset to make it - generate the correct help text. - - If 'global_options' is true, lists the global options: - --verbose, --dry-run, etc. If 'display_options' is true, lists - the "display-only" options: --name, --version, etc. Finally, - lists per-command help for every command name or command class - in 'commands'. - """ - # late import because of mutual dependence between these modules - from distutils.core import gen_usage - from distutils.cmd import Command - - if global_options: - if display_options: - options = self._get_toplevel_options() - else: - options = self.global_options - parser.set_option_table(options) - parser.print_help(self.common_usage + "\nGlobal options:") - print('') - - if display_options: - parser.set_option_table(self.display_options) - parser.print_help( - "Information display options (just display " + - "information, ignore any commands)") - print('') - - for command in self.commands: - if isinstance(command, type) and issubclass(command, Command): - klass = command - else: - klass = self.get_command_class(command) - if (hasattr(klass, 'help_options') and - isinstance(klass.help_options, list)): - parser.set_option_table(klass.user_options + - fix_help_options(klass.help_options)) - else: - parser.set_option_table(klass.user_options) - parser.print_help("Options for '%s' command:" % klass.__name__) - print('') - - print(gen_usage(self.script_name)) - - def handle_display_options(self, option_order): - """If there were any non-global "display-only" options - (--help-commands or the metadata display options) on the command - line, display the requested info and return true; else return - false. - """ - from distutils.core import gen_usage - - # User just wants a list of commands -- we'll print it out and stop - # processing now (ie. if they ran "setup --help-commands foo bar", - # we ignore "foo bar"). - if self.help_commands: - self.print_commands() - print('') - print(gen_usage(self.script_name)) - return 1 - - # If user supplied any of the "display metadata" options, then - # display that metadata in the order in which the user supplied the - # metadata options. - any_display_options = 0 - is_display_option = {} - for option in self.display_options: - is_display_option[option[0]] = 1 - - for (opt, val) in option_order: - if val and is_display_option.get(opt): - opt = translate_longopt(opt) - value = getattr(self.metadata, "get_"+opt)() - if opt in ['keywords', 'platforms']: - print(','.join(value)) - elif opt in ('classifiers', 'provides', 'requires', - 'obsoletes'): - print('\n'.join(value)) - else: - print(value) - any_display_options = 1 - - return any_display_options - - def print_command_list(self, commands, header, max_length): - """Print a subset of the list of all commands -- used by - 'print_commands()'. - """ - print(header + ":") - - for cmd in commands: - klass = self.cmdclass.get(cmd) - if not klass: - klass = self.get_command_class(cmd) - try: - description = klass.description - except AttributeError: - description = "(no description available)" - - print(" %-*s %s" % (max_length, cmd, description)) - - def print_commands(self): - """Print out a help message listing all available commands with a - description of each. The list is divided into "standard commands" - (listed in distutils.command.__all__) and "extra commands" - (mentioned in self.cmdclass, but not a standard command). The - descriptions come from the command class attribute - 'description'. - """ - import distutils.command - std_commands = distutils.command.__all__ - is_std = {} - for cmd in std_commands: - is_std[cmd] = 1 - - extra_commands = [] - for cmd in self.cmdclass.keys(): - if not is_std.get(cmd): - extra_commands.append(cmd) - - max_length = 0 - for cmd in (std_commands + extra_commands): - if len(cmd) > max_length: - max_length = len(cmd) - - self.print_command_list(std_commands, - "Standard commands", - max_length) - if extra_commands: - print() - self.print_command_list(extra_commands, - "Extra commands", - max_length) - - def get_command_list(self): - """Get a list of (command, description) tuples. - The list is divided into "standard commands" (listed in - distutils.command.__all__) and "extra commands" (mentioned in - self.cmdclass, but not a standard command). The descriptions come - from the command class attribute 'description'. - """ - # Currently this is only used on Mac OS, for the Mac-only GUI - # Distutils interface (by Jack Jansen) - import distutils.command - std_commands = distutils.command.__all__ - is_std = {} - for cmd in std_commands: - is_std[cmd] = 1 - - extra_commands = [] - for cmd in self.cmdclass.keys(): - if not is_std.get(cmd): - extra_commands.append(cmd) - - rv = [] - for cmd in (std_commands + extra_commands): - klass = self.cmdclass.get(cmd) - if not klass: - klass = self.get_command_class(cmd) - try: - description = klass.description - except AttributeError: - description = "(no description available)" - rv.append((cmd, description)) - return rv - - # -- Command class/object methods ---------------------------------- - - def get_command_packages(self): - """Return a list of packages from which commands are loaded.""" - pkgs = self.command_packages - if not isinstance(pkgs, list): - if pkgs is None: - pkgs = '' - pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != ''] - if "distutils.command" not in pkgs: - pkgs.insert(0, "distutils.command") - self.command_packages = pkgs - return pkgs - - def get_command_class(self, command): - """Return the class that implements the Distutils command named by - 'command'. First we check the 'cmdclass' dictionary; if the - command is mentioned there, we fetch the class object from the - dictionary and return it. Otherwise we load the command module - ("distutils.command." + command) and fetch the command class from - the module. The loaded class is also stored in 'cmdclass' - to speed future calls to 'get_command_class()'. - - Raises DistutilsModuleError if the expected module could not be - found, or if that module does not define the expected class. - """ - klass = self.cmdclass.get(command) - if klass: - return klass - - for pkgname in self.get_command_packages(): - module_name = "%s.%s" % (pkgname, command) - klass_name = command - - try: - __import__(module_name) - module = sys.modules[module_name] - except ImportError: - continue - - try: - klass = getattr(module, klass_name) - except AttributeError: - raise DistutilsModuleError( - "invalid command '%s' (no class '%s' in module '%s')" - % (command, klass_name, module_name)) - - self.cmdclass[command] = klass - return klass - - raise DistutilsModuleError("invalid command '%s'" % command) - - def get_command_obj(self, command, create=1): - """Return the command object for 'command'. Normally this object - is cached on a previous call to 'get_command_obj()'; if no command - object for 'command' is in the cache, then we either create and - return it (if 'create' is true) or return None. - """ - cmd_obj = self.command_obj.get(command) - if not cmd_obj and create: - if DEBUG: - self.announce("Distribution.get_command_obj(): " - "creating '%s' command object" % command) - - klass = self.get_command_class(command) - cmd_obj = self.command_obj[command] = klass(self) - self.have_run[command] = 0 - - # Set any options that were supplied in config files - # or on the command line. (NB. support for error - # reporting is lame here: any errors aren't reported - # until 'finalize_options()' is called, which means - # we won't report the source of the error.) - options = self.command_options.get(command) - if options: - self._set_command_options(cmd_obj, options) - - return cmd_obj - - def _set_command_options(self, command_obj, option_dict=None): - """Set the options for 'command_obj' from 'option_dict'. Basically - this means copying elements of a dictionary ('option_dict') to - attributes of an instance ('command'). - - 'command_obj' must be a Command instance. If 'option_dict' is not - supplied, uses the standard option dictionary for this command - (from 'self.command_options'). - """ - command_name = command_obj.get_command_name() - if option_dict is None: - option_dict = self.get_option_dict(command_name) - - if DEBUG: - self.announce(" setting options for '%s' command:" % command_name) - for (option, (source, value)) in option_dict.items(): - if DEBUG: - self.announce(" %s = %s (from %s)" % (option, value, - source)) - try: - bool_opts = [translate_longopt(o) - for o in command_obj.boolean_options] - except AttributeError: - bool_opts = [] - try: - neg_opt = command_obj.negative_opt - except AttributeError: - neg_opt = {} - - try: - is_string = isinstance(value, str) - if option in neg_opt and is_string: - setattr(command_obj, neg_opt[option], not strtobool(value)) - elif option in bool_opts and is_string: - setattr(command_obj, option, strtobool(value)) - elif hasattr(command_obj, option): - setattr(command_obj, option, value) - else: - raise DistutilsOptionError( - "error in %s: command '%s' has no such option '%s'" - % (source, command_name, option)) - except ValueError as msg: - raise DistutilsOptionError(msg) - - def reinitialize_command(self, command, reinit_subcommands=0): - """Reinitializes a command to the state it was in when first - returned by 'get_command_obj()': ie., initialized but not yet - finalized. This provides the opportunity to sneak option - values in programmatically, overriding or supplementing - user-supplied values from the config files and command line. - You'll have to re-finalize the command object (by calling - 'finalize_options()' or 'ensure_finalized()') before using it for - real. - - 'command' should be a command name (string) or command object. If - 'reinit_subcommands' is true, also reinitializes the command's - sub-commands, as declared by the 'sub_commands' class attribute (if - it has one). See the "install" command for an example. Only - reinitializes the sub-commands that actually matter, ie. those - whose test predicates return true. - - Returns the reinitialized command object. - """ - from distutils.cmd import Command - if not isinstance(command, Command): - command_name = command - command = self.get_command_obj(command_name) - else: - command_name = command.get_command_name() - - if not command.finalized: - return command - command.initialize_options() - command.finalized = 0 - self.have_run[command_name] = 0 - self._set_command_options(command) - - if reinit_subcommands: - for sub in command.get_sub_commands(): - self.reinitialize_command(sub, reinit_subcommands) - - return command - - # -- Methods that operate on the Distribution ---------------------- - - def announce(self, msg, level=log.INFO): - log.log(level, msg) - - def run_commands(self): - """Run each command that was seen on the setup script command line. - Uses the list of commands found and cache of command objects - created by 'get_command_obj()'. - """ - for cmd in self.commands: - self.run_command(cmd) - - # -- Methods that operate on its Commands -------------------------- - - def run_command(self, command): - """Do whatever it takes to run a command (including nothing at all, - if the command has already been run). Specifically: if we have - already created and run the command named by 'command', return - silently without doing anything. If the command named by 'command' - doesn't even have a command object yet, create one. Then invoke - 'run()' on that command object (or an existing one). - """ - # Already been here, done that? then return silently. - if self.have_run.get(command): - return - - log.info("running %s", command) - cmd_obj = self.get_command_obj(command) - cmd_obj.ensure_finalized() - cmd_obj.run() - self.have_run[command] = 1 - - # -- Distribution query methods ------------------------------------ - - def has_pure_modules(self): - return len(self.packages or self.py_modules or []) > 0 - - def has_ext_modules(self): - return self.ext_modules and len(self.ext_modules) > 0 - - def has_c_libraries(self): - return self.libraries and len(self.libraries) > 0 - - def has_modules(self): - return self.has_pure_modules() or self.has_ext_modules() - - def has_headers(self): - return self.headers and len(self.headers) > 0 - - def has_scripts(self): - return self.scripts and len(self.scripts) > 0 - - def has_data_files(self): - return self.data_files and len(self.data_files) > 0 - - def is_pure(self): - return (self.has_pure_modules() and - not self.has_ext_modules() and - not self.has_c_libraries()) - - # -- Metadata query methods ---------------------------------------- - - # If you're looking for 'get_name()', 'get_version()', and so forth, - # they are defined in a sneaky way: the constructor binds self.get_XXX - # to self.metadata.get_XXX. The actual code is in the - # DistributionMetadata class, below. - -class DistributionMetadata: - """Dummy class to hold the distribution meta-data: name, version, - author, and so forth. - """ - - _METHOD_BASENAMES = ("name", "version", "author", "author_email", - "maintainer", "maintainer_email", "url", - "license", "description", "long_description", - "keywords", "platforms", "fullname", "contact", - "contact_email", "classifiers", "download_url", - # PEP 314 - "provides", "requires", "obsoletes", - ) - - def __init__(self, path=None): - if path is not None: - self.read_pkg_file(open(path)) - else: - self.name = None - self.version = None - self.author = None - self.author_email = None - self.maintainer = None - self.maintainer_email = None - self.url = None - self.license = None - self.description = None - self.long_description = None - self.keywords = None - self.platforms = None - self.classifiers = None - self.download_url = None - # PEP 314 - self.provides = None - self.requires = None - self.obsoletes = None - - def read_pkg_file(self, file): - """Reads the metadata values from a file object.""" - msg = message_from_file(file) - - def _read_field(name): - value = msg[name] - if value == 'UNKNOWN': - return None - return value - - def _read_list(name): - values = msg.get_all(name, None) - if values == []: - return None - return values - - metadata_version = msg['metadata-version'] - self.name = _read_field('name') - self.version = _read_field('version') - self.description = _read_field('summary') - # we are filling author only. - self.author = _read_field('author') - self.maintainer = None - self.author_email = _read_field('author-email') - self.maintainer_email = None - self.url = _read_field('home-page') - self.license = _read_field('license') - - if 'download-url' in msg: - self.download_url = _read_field('download-url') - else: - self.download_url = None - - self.long_description = _read_field('description') - self.description = _read_field('summary') - - if 'keywords' in msg: - self.keywords = _read_field('keywords').split(',') - - self.platforms = _read_list('platform') - self.classifiers = _read_list('classifier') - - # PEP 314 - these fields only exist in 1.1 - if metadata_version == '1.1': - self.requires = _read_list('requires') - self.provides = _read_list('provides') - self.obsoletes = _read_list('obsoletes') - else: - self.requires = None - self.provides = None - self.obsoletes = None - - def write_pkg_info(self, base_dir): - """Write the PKG-INFO file into the release tree. - """ - with open(os.path.join(base_dir, 'PKG-INFO'), 'w', - encoding='UTF-8') as pkg_info: - self.write_pkg_file(pkg_info) - - def write_pkg_file(self, file): - """Write the PKG-INFO format data to a file object. - """ - version = '1.0' - if (self.provides or self.requires or self.obsoletes or - self.classifiers or self.download_url): - version = '1.1' - - file.write('Metadata-Version: %s\n' % version) - file.write('Name: %s\n' % self.get_name()) - file.write('Version: %s\n' % self.get_version()) - file.write('Summary: %s\n' % self.get_description()) - file.write('Home-page: %s\n' % self.get_url()) - file.write('Author: %s\n' % self.get_contact()) - file.write('Author-email: %s\n' % self.get_contact_email()) - file.write('License: %s\n' % self.get_license()) - if self.download_url: - file.write('Download-URL: %s\n' % self.download_url) - - long_desc = rfc822_escape(self.get_long_description()) - file.write('Description: %s\n' % long_desc) - - keywords = ','.join(self.get_keywords()) - if keywords: - file.write('Keywords: %s\n' % keywords) - - self._write_list(file, 'Platform', self.get_platforms()) - self._write_list(file, 'Classifier', self.get_classifiers()) - - # PEP 314 - self._write_list(file, 'Requires', self.get_requires()) - self._write_list(file, 'Provides', self.get_provides()) - self._write_list(file, 'Obsoletes', self.get_obsoletes()) - - def _write_list(self, file, name, values): - for value in values: - file.write('%s: %s\n' % (name, value)) - - # -- Metadata query methods ---------------------------------------- - - def get_name(self): - return self.name or "UNKNOWN" - - def get_version(self): - return self.version or "0.0.0" - - def get_fullname(self): - return "%s-%s" % (self.get_name(), self.get_version()) - - def get_author(self): - return self.author or "UNKNOWN" - - def get_author_email(self): - return self.author_email or "UNKNOWN" - - def get_maintainer(self): - return self.maintainer or "UNKNOWN" - - def get_maintainer_email(self): - return self.maintainer_email or "UNKNOWN" - - def get_contact(self): - return self.maintainer or self.author or "UNKNOWN" - - def get_contact_email(self): - return self.maintainer_email or self.author_email or "UNKNOWN" - - def get_url(self): - return self.url or "UNKNOWN" - - def get_license(self): - return self.license or "UNKNOWN" - get_licence = get_license - - def get_description(self): - return self.description or "UNKNOWN" - - def get_long_description(self): - return self.long_description or "UNKNOWN" - - def get_keywords(self): - return self.keywords or [] - - def set_keywords(self, value): - self.keywords = _ensure_list(value, 'keywords') - - def get_platforms(self): - return self.platforms or ["UNKNOWN"] - - def set_platforms(self, value): - self.platforms = _ensure_list(value, 'platforms') - - def get_classifiers(self): - return self.classifiers or [] - - def set_classifiers(self, value): - self.classifiers = _ensure_list(value, 'classifiers') - - def get_download_url(self): - return self.download_url or "UNKNOWN" - - # PEP 314 - def get_requires(self): - return self.requires or [] - - def set_requires(self, value): - import distutils.versionpredicate - for v in value: - distutils.versionpredicate.VersionPredicate(v) - self.requires = list(value) - - def get_provides(self): - return self.provides or [] - - def set_provides(self, value): - value = [v.strip() for v in value] - for v in value: - import distutils.versionpredicate - distutils.versionpredicate.split_provision(v) - self.provides = value - - def get_obsoletes(self): - return self.obsoletes or [] - - def set_obsoletes(self, value): - import distutils.versionpredicate - for v in value: - distutils.versionpredicate.VersionPredicate(v) - self.obsoletes = list(value) - -def fix_help_options(options): - """Convert a 4-tuple 'help_options' list as found in various command - classes to the 3-tuple form required by FancyGetopt. - """ - new_options = [] - for help_tuple in options: - new_options.append(help_tuple[0:3]) - return new_options diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/errors.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/errors.py deleted file mode 100755 index 8b93059..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/errors.py +++ /dev/null @@ -1,97 +0,0 @@ -"""distutils.errors - -Provides exceptions used by the Distutils modules. Note that Distutils -modules may raise standard exceptions; in particular, SystemExit is -usually raised for errors that are obviously the end-user's fault -(eg. bad command-line arguments). - -This module is safe to use in "from ... import *" mode; it only exports -symbols whose names start with "Distutils" and end with "Error".""" - -class DistutilsError (Exception): - """The root of all Distutils evil.""" - pass - -class DistutilsModuleError (DistutilsError): - """Unable to load an expected module, or to find an expected class - within some module (in particular, command modules and classes).""" - pass - -class DistutilsClassError (DistutilsError): - """Some command class (or possibly distribution class, if anyone - feels a need to subclass Distribution) is found not to be holding - up its end of the bargain, ie. implementing some part of the - "command "interface.""" - pass - -class DistutilsGetoptError (DistutilsError): - """The option table provided to 'fancy_getopt()' is bogus.""" - pass - -class DistutilsArgError (DistutilsError): - """Raised by fancy_getopt in response to getopt.error -- ie. an - error in the command line usage.""" - pass - -class DistutilsFileError (DistutilsError): - """Any problems in the filesystem: expected file not found, etc. - Typically this is for problems that we detect before OSError - could be raised.""" - pass - -class DistutilsOptionError (DistutilsError): - """Syntactic/semantic errors in command options, such as use of - mutually conflicting options, or inconsistent options, - badly-spelled values, etc. No distinction is made between option - values originating in the setup script, the command line, config - files, or what-have-you -- but if we *know* something originated in - the setup script, we'll raise DistutilsSetupError instead.""" - pass - -class DistutilsSetupError (DistutilsError): - """For errors that can be definitely blamed on the setup script, - such as invalid keyword arguments to 'setup()'.""" - pass - -class DistutilsPlatformError (DistutilsError): - """We don't know how to do something on the current platform (but - we do know how to do it on some platform) -- eg. trying to compile - C files on a platform not supported by a CCompiler subclass.""" - pass - -class DistutilsExecError (DistutilsError): - """Any problems executing an external program (such as the C - compiler, when compiling C files).""" - pass - -class DistutilsInternalError (DistutilsError): - """Internal inconsistencies or impossibilities (obviously, this - should never be seen if the code is working!).""" - pass - -class DistutilsTemplateError (DistutilsError): - """Syntax error in a file list template.""" - -class DistutilsByteCompileError(DistutilsError): - """Byte compile error.""" - -# Exception classes used by the CCompiler implementation classes -class CCompilerError (Exception): - """Some compile/link operation failed.""" - -class PreprocessError (CCompilerError): - """Failure to preprocess one or more C/C++ files.""" - -class CompileError (CCompilerError): - """Failure to compile one or more C/C++ source files.""" - -class LibError (CCompilerError): - """Failure to create a static library from one or more C/C++ object - files.""" - -class LinkError (CCompilerError): - """Failure to link one or more C/C++ object files into an executable - or shared library file.""" - -class UnknownFileError (CCompilerError): - """Attempt to process an unknown file type.""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/extension.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/extension.py deleted file mode 100755 index c507da3..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/extension.py +++ /dev/null @@ -1,240 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts.""" - -import os -import warnings - -# This class is really only used by the "build_ext" command, so it might -# make sense to put it in distutils.command.build_ext. However, that -# module is already big enough, and I want to make this class a bit more -# complex to simplify some common cases ("foo" module in "foo.c") and do -# better error-checking ("foo.c" actually exists). -# -# Also, putting this in build_ext.py means every setup script would have to -# import that large-ish module (indirectly, through distutils.core) in -# order to do anything. - -class Extension: - """Just a collection of attributes that describes an extension - module and everything needed to build it (hopefully in a portable - way, but there are hooks that let you be as unportable as you need). - - Instance attributes: - name : string - the full name of the extension, including any packages -- ie. - *not* a filename or pathname, but Python dotted name - sources : [string] - list of source filenames, relative to the distribution root - (where the setup script lives), in Unix form (slash-separated) - for portability. Source files may be C, C++, SWIG (.i), - platform-specific resource files, or whatever else is recognized - by the "build_ext" command as source for a Python extension. - include_dirs : [string] - list of directories to search for C/C++ header files (in Unix - form for portability) - define_macros : [(name : string, value : string|None)] - list of macros to define; each macro is defined using a 2-tuple, - where 'value' is either the string to define it to or None to - define it without a particular value (equivalent of "#define - FOO" in source or -DFOO on Unix C compiler command line) - undef_macros : [string] - list of macros to undefine explicitly - library_dirs : [string] - list of directories to search for C/C++ libraries at link time - libraries : [string] - list of library names (not filenames or paths) to link against - runtime_library_dirs : [string] - list of directories to search for C/C++ libraries at run time - (for shared extensions, this is when the extension is loaded) - extra_objects : [string] - list of extra files to link with (eg. object files not implied - by 'sources', static library that must be explicitly specified, - binary resource files, etc.) - extra_compile_args : [string] - any extra platform- and compiler-specific information to use - when compiling the source files in 'sources'. For platforms and - compilers where "command line" makes sense, this is typically a - list of command-line arguments, but for other platforms it could - be anything. - extra_link_args : [string] - any extra platform- and compiler-specific information to use - when linking object files together to create the extension (or - to create a new static Python interpreter). Similar - interpretation as for 'extra_compile_args'. - export_symbols : [string] - list of symbols to be exported from a shared extension. Not - used on all platforms, and not generally necessary for Python - extensions, which typically export exactly one symbol: "init" + - extension_name. - swig_opts : [string] - any extra options to pass to SWIG if a source file has the .i - extension. - depends : [string] - list of files that the extension depends on - language : string - extension language (i.e. "c", "c++", "objc"). Will be detected - from the source extensions if not provided. - optional : boolean - specifies that a build failure in the extension should not abort the - build process, but simply not install the failing extension. - """ - - # When adding arguments to this constructor, be sure to update - # setup_keywords in core.py. - def __init__(self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts = None, - depends=None, - language=None, - optional=None, - **kw # To catch unknown keywords - ): - if not isinstance(name, str): - raise AssertionError("'name' must be a string") - if not (isinstance(sources, list) and - all(isinstance(v, str) for v in sources)): - raise AssertionError("'sources' must be a list of strings") - - self.name = name - self.sources = sources - self.include_dirs = include_dirs or [] - self.define_macros = define_macros or [] - self.undef_macros = undef_macros or [] - self.library_dirs = library_dirs or [] - self.libraries = libraries or [] - self.runtime_library_dirs = runtime_library_dirs or [] - self.extra_objects = extra_objects or [] - self.extra_compile_args = extra_compile_args or [] - self.extra_link_args = extra_link_args or [] - self.export_symbols = export_symbols or [] - self.swig_opts = swig_opts or [] - self.depends = depends or [] - self.language = language - self.optional = optional - - # If there are unknown keyword options, warn about them - if len(kw) > 0: - options = [repr(option) for option in kw] - options = ', '.join(sorted(options)) - msg = "Unknown Extension options: %s" % options - warnings.warn(msg) - - def __repr__(self): - return '<%s.%s(%r) at %#x>' % ( - self.__class__.__module__, - self.__class__.__qualname__, - self.name, - id(self)) - - -def read_setup_file(filename): - """Reads a Setup file and returns Extension instances.""" - from distutils.sysconfig import (parse_makefile, expand_makefile_vars, - _variable_rx) - - from distutils.text_file import TextFile - from distutils.util import split_quoted - - # First pass over the file to gather "VAR = VALUE" assignments. - vars = parse_makefile(filename) - - # Second pass to gobble up the real content: lines of the form - # ... [ ...] [ ...] [ ...] - file = TextFile(filename, - strip_comments=1, skip_blanks=1, join_lines=1, - lstrip_ws=1, rstrip_ws=1) - try: - extensions = [] - - while True: - line = file.readline() - if line is None: # eof - break - if _variable_rx.match(line): # VAR=VALUE, handled in first pass - continue - - if line[0] == line[-1] == "*": - file.warn("'%s' lines not handled yet" % line) - continue - - line = expand_makefile_vars(line, vars) - words = split_quoted(line) - - # NB. this parses a slightly different syntax than the old - # makesetup script: here, there must be exactly one extension per - # line, and it must be the first word of the line. I have no idea - # why the old syntax supported multiple extensions per line, as - # they all wind up being the same. - - module = words[0] - ext = Extension(module, []) - append_next_word = None - - for word in words[1:]: - if append_next_word is not None: - append_next_word.append(word) - append_next_word = None - continue - - suffix = os.path.splitext(word)[1] - switch = word[0:2] ; value = word[2:] - - if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"): - # hmm, should we do something about C vs. C++ sources? - # or leave it up to the CCompiler implementation to - # worry about? - ext.sources.append(word) - elif switch == "-I": - ext.include_dirs.append(value) - elif switch == "-D": - equals = value.find("=") - if equals == -1: # bare "-DFOO" -- no value - ext.define_macros.append((value, None)) - else: # "-DFOO=blah" - ext.define_macros.append((value[0:equals], - value[equals+2:])) - elif switch == "-U": - ext.undef_macros.append(value) - elif switch == "-C": # only here 'cause makesetup has it! - ext.extra_compile_args.append(word) - elif switch == "-l": - ext.libraries.append(value) - elif switch == "-L": - ext.library_dirs.append(value) - elif switch == "-R": - ext.runtime_library_dirs.append(value) - elif word == "-rpath": - append_next_word = ext.runtime_library_dirs - elif word == "-Xlinker": - append_next_word = ext.extra_link_args - elif word == "-Xcompiler": - append_next_word = ext.extra_compile_args - elif switch == "-u": - ext.extra_link_args.append(word) - if not value: - append_next_word = ext.extra_link_args - elif suffix in (".a", ".so", ".sl", ".o", ".dylib"): - # NB. a really faithful emulation of makesetup would - # append a .o file to extra_objects only if it - # had a slash in it; otherwise, it would s/.o/.c/ - # and append it to sources. Hmmmm. - ext.extra_objects.append(word) - else: - file.warn("unrecognized argument '%s'" % word) - - extensions.append(ext) - finally: - file.close() - - return extensions diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/fancy_getopt.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/fancy_getopt.py deleted file mode 100755 index 7d170dd..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/fancy_getopt.py +++ /dev/null @@ -1,457 +0,0 @@ -"""distutils.fancy_getopt - -Wrapper around the standard getopt module that provides the following -additional features: - * short and long options are tied together - * options have help strings, so fancy_getopt could potentially - create a complete usage summary - * options set attributes of a passed-in object -""" - -import sys, string, re -import getopt -from distutils.errors import * - -# Much like command_re in distutils.core, this is close to but not quite -# the same as a Python NAME -- except, in the spirit of most GNU -# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!) -# The similarities to NAME are again not a coincidence... -longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)' -longopt_re = re.compile(r'^%s$' % longopt_pat) - -# For recognizing "negative alias" options, eg. "quiet=!verbose" -neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat)) - -# This is used to translate long options to legitimate Python identifiers -# (for use as attributes of some object). -longopt_xlate = str.maketrans('-', '_') - -class FancyGetopt: - """Wrapper around the standard 'getopt()' module that provides some - handy extra functionality: - * short and long options are tied together - * options have help strings, and help text can be assembled - from them - * options set attributes of a passed-in object - * boolean options can have "negative aliases" -- eg. if - --quiet is the "negative alias" of --verbose, then "--quiet" - on the command line sets 'verbose' to false - """ - - def __init__(self, option_table=None): - # The option table is (currently) a list of tuples. The - # tuples may have 3 or four values: - # (long_option, short_option, help_string [, repeatable]) - # if an option takes an argument, its long_option should have '=' - # appended; short_option should just be a single character, no ':' - # in any case. If a long_option doesn't have a corresponding - # short_option, short_option should be None. All option tuples - # must have long options. - self.option_table = option_table - - # 'option_index' maps long option names to entries in the option - # table (ie. those 3-tuples). - self.option_index = {} - if self.option_table: - self._build_index() - - # 'alias' records (duh) alias options; {'foo': 'bar'} means - # --foo is an alias for --bar - self.alias = {} - - # 'negative_alias' keeps track of options that are the boolean - # opposite of some other option - self.negative_alias = {} - - # These keep track of the information in the option table. We - # don't actually populate these structures until we're ready to - # parse the command-line, since the 'option_table' passed in here - # isn't necessarily the final word. - self.short_opts = [] - self.long_opts = [] - self.short2long = {} - self.attr_name = {} - self.takes_arg = {} - - # And 'option_order' is filled up in 'getopt()'; it records the - # original order of options (and their values) on the command-line, - # but expands short options, converts aliases, etc. - self.option_order = [] - - def _build_index(self): - self.option_index.clear() - for option in self.option_table: - self.option_index[option[0]] = option - - def set_option_table(self, option_table): - self.option_table = option_table - self._build_index() - - def add_option(self, long_option, short_option=None, help_string=None): - if long_option in self.option_index: - raise DistutilsGetoptError( - "option conflict: already an option '%s'" % long_option) - else: - option = (long_option, short_option, help_string) - self.option_table.append(option) - self.option_index[long_option] = option - - def has_option(self, long_option): - """Return true if the option table for this parser has an - option with long name 'long_option'.""" - return long_option in self.option_index - - def get_attr_name(self, long_option): - """Translate long option name 'long_option' to the form it - has as an attribute of some object: ie., translate hyphens - to underscores.""" - return long_option.translate(longopt_xlate) - - def _check_alias_dict(self, aliases, what): - assert isinstance(aliases, dict) - for (alias, opt) in aliases.items(): - if alias not in self.option_index: - raise DistutilsGetoptError(("invalid %s '%s': " - "option '%s' not defined") % (what, alias, alias)) - if opt not in self.option_index: - raise DistutilsGetoptError(("invalid %s '%s': " - "aliased option '%s' not defined") % (what, alias, opt)) - - def set_aliases(self, alias): - """Set the aliases for this option parser.""" - self._check_alias_dict(alias, "alias") - self.alias = alias - - def set_negative_aliases(self, negative_alias): - """Set the negative aliases for this option parser. - 'negative_alias' should be a dictionary mapping option names to - option names, both the key and value must already be defined - in the option table.""" - self._check_alias_dict(negative_alias, "negative alias") - self.negative_alias = negative_alias - - def _grok_option_table(self): - """Populate the various data structures that keep tabs on the - option table. Called by 'getopt()' before it can do anything - worthwhile. - """ - self.long_opts = [] - self.short_opts = [] - self.short2long.clear() - self.repeat = {} - - for option in self.option_table: - if len(option) == 3: - long, short, help = option - repeat = 0 - elif len(option) == 4: - long, short, help, repeat = option - else: - # the option table is part of the code, so simply - # assert that it is correct - raise ValueError("invalid option tuple: %r" % (option,)) - - # Type- and value-check the option names - if not isinstance(long, str) or len(long) < 2: - raise DistutilsGetoptError(("invalid long option '%s': " - "must be a string of length >= 2") % long) - - if (not ((short is None) or - (isinstance(short, str) and len(short) == 1))): - raise DistutilsGetoptError("invalid short option '%s': " - "must a single character or None" % short) - - self.repeat[long] = repeat - self.long_opts.append(long) - - if long[-1] == '=': # option takes an argument? - if short: short = short + ':' - long = long[0:-1] - self.takes_arg[long] = 1 - else: - # Is option is a "negative alias" for some other option (eg. - # "quiet" == "!verbose")? - alias_to = self.negative_alias.get(long) - if alias_to is not None: - if self.takes_arg[alias_to]: - raise DistutilsGetoptError( - "invalid negative alias '%s': " - "aliased option '%s' takes a value" - % (long, alias_to)) - - self.long_opts[-1] = long # XXX redundant?! - self.takes_arg[long] = 0 - - # If this is an alias option, make sure its "takes arg" flag is - # the same as the option it's aliased to. - alias_to = self.alias.get(long) - if alias_to is not None: - if self.takes_arg[long] != self.takes_arg[alias_to]: - raise DistutilsGetoptError( - "invalid alias '%s': inconsistent with " - "aliased option '%s' (one of them takes a value, " - "the other doesn't" - % (long, alias_to)) - - # Now enforce some bondage on the long option name, so we can - # later translate it to an attribute name on some object. Have - # to do this a bit late to make sure we've removed any trailing - # '='. - if not longopt_re.match(long): - raise DistutilsGetoptError( - "invalid long option name '%s' " - "(must be letters, numbers, hyphens only" % long) - - self.attr_name[long] = self.get_attr_name(long) - if short: - self.short_opts.append(short) - self.short2long[short[0]] = long - - def getopt(self, args=None, object=None): - """Parse command-line options in args. Store as attributes on object. - - If 'args' is None or not supplied, uses 'sys.argv[1:]'. If - 'object' is None or not supplied, creates a new OptionDummy - object, stores option values there, and returns a tuple (args, - object). If 'object' is supplied, it is modified in place and - 'getopt()' just returns 'args'; in both cases, the returned - 'args' is a modified copy of the passed-in 'args' list, which - is left untouched. - """ - if args is None: - args = sys.argv[1:] - if object is None: - object = OptionDummy() - created_object = True - else: - created_object = False - - self._grok_option_table() - - short_opts = ' '.join(self.short_opts) - try: - opts, args = getopt.getopt(args, short_opts, self.long_opts) - except getopt.error as msg: - raise DistutilsArgError(msg) - - for opt, val in opts: - if len(opt) == 2 and opt[0] == '-': # it's a short option - opt = self.short2long[opt[1]] - else: - assert len(opt) > 2 and opt[:2] == '--' - opt = opt[2:] - - alias = self.alias.get(opt) - if alias: - opt = alias - - if not self.takes_arg[opt]: # boolean option? - assert val == '', "boolean option can't have value" - alias = self.negative_alias.get(opt) - if alias: - opt = alias - val = 0 - else: - val = 1 - - attr = self.attr_name[opt] - # The only repeating option at the moment is 'verbose'. - # It has a negative option -q quiet, which should set verbose = 0. - if val and self.repeat.get(attr) is not None: - val = getattr(object, attr, 0) + 1 - setattr(object, attr, val) - self.option_order.append((opt, val)) - - # for opts - if created_object: - return args, object - else: - return args - - def get_option_order(self): - """Returns the list of (option, value) tuples processed by the - previous run of 'getopt()'. Raises RuntimeError if - 'getopt()' hasn't been called yet. - """ - if self.option_order is None: - raise RuntimeError("'getopt()' hasn't been called yet") - else: - return self.option_order - - def generate_help(self, header=None): - """Generate help text (a list of strings, one per suggested line of - output) from the option table for this FancyGetopt object. - """ - # Blithely assume the option table is good: probably wouldn't call - # 'generate_help()' unless you've already called 'getopt()'. - - # First pass: determine maximum length of long option names - max_opt = 0 - for option in self.option_table: - long = option[0] - short = option[1] - l = len(long) - if long[-1] == '=': - l = l - 1 - if short is not None: - l = l + 5 # " (-x)" where short == 'x' - if l > max_opt: - max_opt = l - - opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter - - # Typical help block looks like this: - # --foo controls foonabulation - # Help block for longest option looks like this: - # --flimflam set the flim-flam level - # and with wrapped text: - # --flimflam set the flim-flam level (must be between - # 0 and 100, except on Tuesdays) - # Options with short names will have the short name shown (but - # it doesn't contribute to max_opt): - # --foo (-f) controls foonabulation - # If adding the short option would make the left column too wide, - # we push the explanation off to the next line - # --flimflam (-l) - # set the flim-flam level - # Important parameters: - # - 2 spaces before option block start lines - # - 2 dashes for each long option name - # - min. 2 spaces between option and explanation (gutter) - # - 5 characters (incl. space) for short option name - - # Now generate lines of help text. (If 80 columns were good enough - # for Jesus, then 78 columns are good enough for me!) - line_width = 78 - text_width = line_width - opt_width - big_indent = ' ' * opt_width - if header: - lines = [header] - else: - lines = ['Option summary:'] - - for option in self.option_table: - long, short, help = option[:3] - text = wrap_text(help, text_width) - if long[-1] == '=': - long = long[0:-1] - - # Case 1: no short option at all (makes life easy) - if short is None: - if text: - lines.append(" --%-*s %s" % (max_opt, long, text[0])) - else: - lines.append(" --%-*s " % (max_opt, long)) - - # Case 2: we have a short option, so we have to include it - # just after the long option - else: - opt_names = "%s (-%s)" % (long, short) - if text: - lines.append(" --%-*s %s" % - (max_opt, opt_names, text[0])) - else: - lines.append(" --%-*s" % opt_names) - - for l in text[1:]: - lines.append(big_indent + l) - return lines - - def print_help(self, header=None, file=None): - if file is None: - file = sys.stdout - for line in self.generate_help(header): - file.write(line + "\n") - - -def fancy_getopt(options, negative_opt, object, args): - parser = FancyGetopt(options) - parser.set_negative_aliases(negative_opt) - return parser.getopt(args, object) - - -WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace} - -def wrap_text(text, width): - """wrap_text(text : string, width : int) -> [string] - - Split 'text' into multiple lines of no more than 'width' characters - each, and return the list of strings that results. - """ - if text is None: - return [] - if len(text) <= width: - return [text] - - text = text.expandtabs() - text = text.translate(WS_TRANS) - chunks = re.split(r'( +|-+)', text) - chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings - lines = [] - - while chunks: - cur_line = [] # list of chunks (to-be-joined) - cur_len = 0 # length of current line - - while chunks: - l = len(chunks[0]) - if cur_len + l <= width: # can squeeze (at least) this chunk in - cur_line.append(chunks[0]) - del chunks[0] - cur_len = cur_len + l - else: # this line is full - # drop last chunk if all space - if cur_line and cur_line[-1][0] == ' ': - del cur_line[-1] - break - - if chunks: # any chunks left to process? - # if the current line is still empty, then we had a single - # chunk that's too big too fit on a line -- so we break - # down and break it up at the line width - if cur_len == 0: - cur_line.append(chunks[0][0:width]) - chunks[0] = chunks[0][width:] - - # all-whitespace chunks at the end of a line can be discarded - # (and we know from the re.split above that if a chunk has - # *any* whitespace, it is *all* whitespace) - if chunks[0][0] == ' ': - del chunks[0] - - # and store this line in the list-of-all-lines -- as a single - # string, of course! - lines.append(''.join(cur_line)) - - return lines - - -def translate_longopt(opt): - """Convert a long option name to a valid Python identifier by - changing "-" to "_". - """ - return opt.translate(longopt_xlate) - - -class OptionDummy: - """Dummy class just used as a place to hold command-line option - values as instance attributes.""" - - def __init__(self, options=[]): - """Create a new OptionDummy instance. The attributes listed in - 'options' will be initialized to None.""" - for opt in options: - setattr(self, opt, None) - - -if __name__ == "__main__": - text = """\ -Tra-la-la, supercalifragilisticexpialidocious. -How *do* you spell that odd word, anyways? -(Someone ask Mary -- she'll know [or she'll -say, "How should I know?"].)""" - - for w in (10, 20, 30, 40): - print("width: %d" % w) - print("\n".join(wrap_text(text, w))) - print() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/file_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/file_util.py deleted file mode 100755 index b3fee35..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/file_util.py +++ /dev/null @@ -1,238 +0,0 @@ -"""distutils.file_util - -Utility functions for operating on single files. -""" - -import os -from distutils.errors import DistutilsFileError -from distutils import log - -# for generating verbose output in 'copy_file()' -_copy_action = { None: 'copying', - 'hard': 'hard linking', - 'sym': 'symbolically linking' } - - -def _copy_file_contents(src, dst, buffer_size=16*1024): - """Copy the file 'src' to 'dst'; both must be filenames. Any error - opening either file, reading from 'src', or writing to 'dst', raises - DistutilsFileError. Data is read/written in chunks of 'buffer_size' - bytes (default 16k). No attempt is made to handle anything apart from - regular files. - """ - # Stolen from shutil module in the standard library, but with - # custom error-handling added. - fsrc = None - fdst = None - try: - try: - fsrc = open(src, 'rb') - except OSError as e: - raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror)) - - if os.path.exists(dst): - try: - os.unlink(dst) - except OSError as e: - raise DistutilsFileError( - "could not delete '%s': %s" % (dst, e.strerror)) - - try: - fdst = open(dst, 'wb') - except OSError as e: - raise DistutilsFileError( - "could not create '%s': %s" % (dst, e.strerror)) - - while True: - try: - buf = fsrc.read(buffer_size) - except OSError as e: - raise DistutilsFileError( - "could not read from '%s': %s" % (src, e.strerror)) - - if not buf: - break - - try: - fdst.write(buf) - except OSError as e: - raise DistutilsFileError( - "could not write to '%s': %s" % (dst, e.strerror)) - finally: - if fdst: - fdst.close() - if fsrc: - fsrc.close() - -def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0, - link=None, verbose=1, dry_run=0): - """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is - copied there with the same name; otherwise, it must be a filename. (If - the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' - is true (the default), the file's mode (type and permission bits, or - whatever is analogous on the current platform) is copied. If - 'preserve_times' is true (the default), the last-modified and - last-access times are copied as well. If 'update' is true, 'src' will - only be copied if 'dst' does not exist, or if 'dst' does exist but is - older than 'src'. - - 'link' allows you to make hard links (os.link) or symbolic links - (os.symlink) instead of copying: set it to "hard" or "sym"; if it is - None (the default), files are copied. Don't set 'link' on systems that - don't support it: 'copy_file()' doesn't check if hard or symbolic - linking is available. If hardlink fails, falls back to - _copy_file_contents(). - - Under Mac OS, uses the native file copy function in macostools; on - other systems, uses '_copy_file_contents()' to copy file contents. - - Return a tuple (dest_name, copied): 'dest_name' is the actual name of - the output file, and 'copied' is true if the file was copied (or would - have been copied, if 'dry_run' true). - """ - # XXX if the destination file already exists, we clobber it if - # copying, but blow up if linking. Hmmm. And I don't know what - # macostools.copyfile() does. Should definitely be consistent, and - # should probably blow up if destination exists and we would be - # changing it (ie. it's not already a hard/soft link to src OR - # (not update) and (src newer than dst). - - from distutils.dep_util import newer - from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE - - if not os.path.isfile(src): - raise DistutilsFileError( - "can't copy '%s': doesn't exist or not a regular file" % src) - - if os.path.isdir(dst): - dir = dst - dst = os.path.join(dst, os.path.basename(src)) - else: - dir = os.path.dirname(dst) - - if update and not newer(src, dst): - if verbose >= 1: - log.debug("not copying %s (output up-to-date)", src) - return (dst, 0) - - try: - action = _copy_action[link] - except KeyError: - raise ValueError("invalid value '%s' for 'link' argument" % link) - - if verbose >= 1: - if os.path.basename(dst) == os.path.basename(src): - log.info("%s %s -> %s", action, src, dir) - else: - log.info("%s %s -> %s", action, src, dst) - - if dry_run: - return (dst, 1) - - # If linking (hard or symbolic), use the appropriate system call - # (Unix only, of course, but that's the caller's responsibility) - elif link == 'hard': - if not (os.path.exists(dst) and os.path.samefile(src, dst)): - try: - os.link(src, dst) - return (dst, 1) - except OSError: - # If hard linking fails, fall back on copying file - # (some special filesystems don't support hard linking - # even under Unix, see issue #8876). - pass - elif link == 'sym': - if not (os.path.exists(dst) and os.path.samefile(src, dst)): - os.symlink(src, dst) - return (dst, 1) - - # Otherwise (non-Mac, not linking), copy the file contents and - # (optionally) copy the times and mode. - _copy_file_contents(src, dst) - if preserve_mode or preserve_times: - st = os.stat(src) - - # According to David Ascher , utime() should be done - # before chmod() (at least under NT). - if preserve_times: - os.utime(dst, (st[ST_ATIME], st[ST_MTIME])) - if preserve_mode: - os.chmod(dst, S_IMODE(st[ST_MODE])) - - return (dst, 1) - - -# XXX I suspect this is Unix-specific -- need porting help! -def move_file (src, dst, - verbose=1, - dry_run=0): - - """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will - be moved into it with the same name; otherwise, 'src' is just renamed - to 'dst'. Return the new full name of the file. - - Handles cross-device moves on Unix using 'copy_file()'. What about - other systems??? - """ - from os.path import exists, isfile, isdir, basename, dirname - import errno - - if verbose >= 1: - log.info("moving %s -> %s", src, dst) - - if dry_run: - return dst - - if not isfile(src): - raise DistutilsFileError("can't move '%s': not a regular file" % src) - - if isdir(dst): - dst = os.path.join(dst, basename(src)) - elif exists(dst): - raise DistutilsFileError( - "can't move '%s': destination '%s' already exists" % - (src, dst)) - - if not isdir(dirname(dst)): - raise DistutilsFileError( - "can't move '%s': destination '%s' not a valid path" % - (src, dst)) - - copy_it = False - try: - os.rename(src, dst) - except OSError as e: - (num, msg) = e.args - if num == errno.EXDEV: - copy_it = True - else: - raise DistutilsFileError( - "couldn't move '%s' to '%s': %s" % (src, dst, msg)) - - if copy_it: - copy_file(src, dst, verbose=verbose) - try: - os.unlink(src) - except OSError as e: - (num, msg) = e.args - try: - os.unlink(dst) - except OSError: - pass - raise DistutilsFileError( - "couldn't move '%s' to '%s' by copy/delete: " - "delete '%s' failed: %s" - % (src, dst, src, msg)) - return dst - - -def write_file (filename, contents): - """Create a file with the specified name and write 'contents' (a - sequence of strings without line terminators) to it. - """ - f = open(filename, "w") - try: - for line in contents: - f.write(line + "\n") - finally: - f.close() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/filelist.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/filelist.py deleted file mode 100755 index 82a7738..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/filelist.py +++ /dev/null @@ -1,355 +0,0 @@ -"""distutils.filelist - -Provides the FileList class, used for poking about the filesystem -and building lists of files. -""" - -import os -import re -import fnmatch -import functools - -from distutils.util import convert_path -from distutils.errors import DistutilsTemplateError, DistutilsInternalError -from distutils import log - - -class FileList: - """A list of files built by on exploring the filesystem and filtered by - applying various patterns to what we find there. - - Instance attributes: - dir - directory from which files will be taken -- only used if - 'allfiles' not supplied to constructor - files - list of filenames currently being built/filtered/manipulated - allfiles - complete list of files under consideration (ie. without any - filtering applied) - """ - - def __init__(self, warn=None, debug_print=None): - # ignore argument to FileList, but keep them for backwards - # compatibility - self.allfiles = None - self.files = [] - - def set_allfiles(self, allfiles): - self.allfiles = allfiles - - def findall(self, dir=os.curdir): - self.allfiles = findall(dir) - - def debug_print(self, msg): - """Print 'msg' to stdout if the global DEBUG (taken from the - DISTUTILS_DEBUG environment variable) flag is true. - """ - from distutils.debug import DEBUG - if DEBUG: - print(msg) - - # Collection methods - - def append(self, item): - self.files.append(item) - - def extend(self, items): - self.files.extend(items) - - def sort(self): - # Not a strict lexical sort! - sortable_files = sorted(map(os.path.split, self.files)) - self.files = [] - for sort_tuple in sortable_files: - self.files.append(os.path.join(*sort_tuple)) - - # Other miscellaneous utility methods - - def remove_duplicates(self): - # Assumes list has been sorted! - for i in range(len(self.files) - 1, 0, -1): - if self.files[i] == self.files[i - 1]: - del self.files[i] - - # "File template" methods - - def _parse_template_line(self, line): - words = line.split() - action = words[0] - - patterns = dir = dir_pattern = None - - if action in ('include', 'exclude', - 'global-include', 'global-exclude'): - if len(words) < 2: - raise DistutilsTemplateError( - "'%s' expects ..." % action) - patterns = [convert_path(w) for w in words[1:]] - elif action in ('recursive-include', 'recursive-exclude'): - if len(words) < 3: - raise DistutilsTemplateError( - "'%s' expects ..." % action) - dir = convert_path(words[1]) - patterns = [convert_path(w) for w in words[2:]] - elif action in ('graft', 'prune'): - if len(words) != 2: - raise DistutilsTemplateError( - "'%s' expects a single " % action) - dir_pattern = convert_path(words[1]) - else: - raise DistutilsTemplateError("unknown action '%s'" % action) - - return (action, patterns, dir, dir_pattern) - - def process_template_line(self, line): - # Parse the line: split it up, make sure the right number of words - # is there, and return the relevant words. 'action' is always - # defined: it's the first word of the line. Which of the other - # three are defined depends on the action; it'll be either - # patterns, (dir and patterns), or (dir_pattern). - (action, patterns, dir, dir_pattern) = self._parse_template_line(line) - - # OK, now we know that the action is valid and we have the - # right number of words on the line for that action -- so we - # can proceed with minimal error-checking. - if action == 'include': - self.debug_print("include " + ' '.join(patterns)) - for pattern in patterns: - if not self.include_pattern(pattern, anchor=1): - log.warn("warning: no files found matching '%s'", - pattern) - - elif action == 'exclude': - self.debug_print("exclude " + ' '.join(patterns)) - for pattern in patterns: - if not self.exclude_pattern(pattern, anchor=1): - log.warn(("warning: no previously-included files " - "found matching '%s'"), pattern) - - elif action == 'global-include': - self.debug_print("global-include " + ' '.join(patterns)) - for pattern in patterns: - if not self.include_pattern(pattern, anchor=0): - log.warn(("warning: no files found matching '%s' " - "anywhere in distribution"), pattern) - - elif action == 'global-exclude': - self.debug_print("global-exclude " + ' '.join(patterns)) - for pattern in patterns: - if not self.exclude_pattern(pattern, anchor=0): - log.warn(("warning: no previously-included files matching " - "'%s' found anywhere in distribution"), - pattern) - - elif action == 'recursive-include': - self.debug_print("recursive-include %s %s" % - (dir, ' '.join(patterns))) - for pattern in patterns: - if not self.include_pattern(pattern, prefix=dir): - msg = ( - "warning: no files found matching '%s' " - "under directory '%s'" - ) - log.warn(msg, pattern, dir) - - elif action == 'recursive-exclude': - self.debug_print("recursive-exclude %s %s" % - (dir, ' '.join(patterns))) - for pattern in patterns: - if not self.exclude_pattern(pattern, prefix=dir): - log.warn(("warning: no previously-included files matching " - "'%s' found under directory '%s'"), - pattern, dir) - - elif action == 'graft': - self.debug_print("graft " + dir_pattern) - if not self.include_pattern(None, prefix=dir_pattern): - log.warn("warning: no directories found matching '%s'", - dir_pattern) - - elif action == 'prune': - self.debug_print("prune " + dir_pattern) - if not self.exclude_pattern(None, prefix=dir_pattern): - log.warn(("no previously-included directories found " - "matching '%s'"), dir_pattern) - else: - raise DistutilsInternalError( - "this cannot happen: invalid action '%s'" % action) - - # Filtering/selection methods - - def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): - """Select strings (presumably filenames) from 'self.files' that - match 'pattern', a Unix-style wildcard (glob) pattern. Patterns - are not quite the same as implemented by the 'fnmatch' module: '*' - and '?' match non-special characters, where "special" is platform- - dependent: slash on Unix; colon, slash, and backslash on - DOS/Windows; and colon on Mac OS. - - If 'anchor' is true (the default), then the pattern match is more - stringent: "*.py" will match "foo.py" but not "foo/bar.py". If - 'anchor' is false, both of these will match. - - If 'prefix' is supplied, then only filenames starting with 'prefix' - (itself a pattern) and ending with 'pattern', with anything in between - them, will match. 'anchor' is ignored in this case. - - If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and - 'pattern' is assumed to be either a string containing a regex or a - regex object -- no translation is done, the regex is just compiled - and used as-is. - - Selected strings will be added to self.files. - - Return True if files are found, False otherwise. - """ - # XXX docstring lying about what the special chars are? - files_found = False - pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) - self.debug_print("include_pattern: applying regex r'%s'" % - pattern_re.pattern) - - # delayed loading of allfiles list - if self.allfiles is None: - self.findall() - - for name in self.allfiles: - if pattern_re.search(name): - self.debug_print(" adding " + name) - self.files.append(name) - files_found = True - return files_found - - def exclude_pattern( - self, pattern, anchor=1, prefix=None, is_regex=0): - """Remove strings (presumably filenames) from 'files' that match - 'pattern'. Other parameters are the same as for - 'include_pattern()', above. - The list 'self.files' is modified in place. - Return True if files are found, False otherwise. - """ - files_found = False - pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) - self.debug_print("exclude_pattern: applying regex r'%s'" % - pattern_re.pattern) - for i in range(len(self.files)-1, -1, -1): - if pattern_re.search(self.files[i]): - self.debug_print(" removing " + self.files[i]) - del self.files[i] - files_found = True - return files_found - - -# Utility functions - -def _find_all_simple(path): - """ - Find all files under 'path' - """ - all_unique = _UniqueDirs.filter(os.walk(path, followlinks=True)) - results = ( - os.path.join(base, file) - for base, dirs, files in all_unique - for file in files - ) - return filter(os.path.isfile, results) - - -class _UniqueDirs(set): - """ - Exclude previously-seen dirs from walk results, - avoiding infinite recursion. - Ref https://bugs.python.org/issue44497. - """ - def __call__(self, walk_item): - """ - Given an item from an os.walk result, determine - if the item represents a unique dir for this instance - and if not, prevent further traversal. - """ - base, dirs, files = walk_item - stat = os.stat(base) - candidate = stat.st_dev, stat.st_ino - found = candidate in self - if found: - del dirs[:] - self.add(candidate) - return not found - - @classmethod - def filter(cls, items): - return filter(cls(), items) - - -def findall(dir=os.curdir): - """ - Find all files under 'dir' and return the list of full filenames. - Unless dir is '.', return full filenames with dir prepended. - """ - files = _find_all_simple(dir) - if dir == os.curdir: - make_rel = functools.partial(os.path.relpath, start=dir) - files = map(make_rel, files) - return list(files) - - -def glob_to_re(pattern): - """Translate a shell-like glob pattern to a regular expression; return - a string containing the regex. Differs from 'fnmatch.translate()' in - that '*' does not match "special characters" (which are - platform-specific). - """ - pattern_re = fnmatch.translate(pattern) - - # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which - # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, - # and by extension they shouldn't match such "special characters" under - # any OS. So change all non-escaped dots in the RE to match any - # character except the special characters (currently: just os.sep). - sep = os.sep - if os.sep == '\\': - # we're using a regex to manipulate a regex, so we need - # to escape the backslash twice - sep = r'\\\\' - escaped = r'\1[^%s]' % sep - pattern_re = re.sub(r'((?= self.threshold: - if args: - msg = msg % args - if level in (WARN, ERROR, FATAL): - stream = sys.stderr - else: - stream = sys.stdout - try: - stream.write('%s\n' % msg) - except UnicodeEncodeError: - # emulate backslashreplace error handler - encoding = stream.encoding - msg = msg.encode(encoding, "backslashreplace").decode(encoding) - stream.write('%s\n' % msg) - stream.flush() - - def log(self, level, msg, *args): - self._log(level, msg, args) - - def debug(self, msg, *args): - self._log(DEBUG, msg, args) - - def info(self, msg, *args): - self._log(INFO, msg, args) - - def warn(self, msg, *args): - self._log(WARN, msg, args) - - def error(self, msg, *args): - self._log(ERROR, msg, args) - - def fatal(self, msg, *args): - self._log(FATAL, msg, args) - - -_global_log = Log() -log = _global_log.log -debug = _global_log.debug -info = _global_log.info -warn = _global_log.warn -error = _global_log.error -fatal = _global_log.fatal - - -def set_threshold(level): - # return the old threshold for use from tests - old = _global_log.threshold - _global_log.threshold = level - return old - - -def set_verbosity(v): - if v <= 0: - set_threshold(WARN) - elif v == 1: - set_threshold(INFO) - elif v >= 2: - set_threshold(DEBUG) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/msvc9compiler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/msvc9compiler.py deleted file mode 100755 index 6b62738..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/msvc9compiler.py +++ /dev/null @@ -1,788 +0,0 @@ -"""distutils.msvc9compiler - -Contains MSVCCompiler, an implementation of the abstract CCompiler class -for the Microsoft Visual Studio 2008. - -The module is compatible with VS 2005 and VS 2008. You can find legacy support -for older versions of VS in distutils.msvccompiler. -""" - -# Written by Perry Stoll -# hacked by Robin Becker and Thomas Heller to do a better job of -# finding DevStudio (through the registry) -# ported to VS2005 and VS 2008 by Christian Heimes - -import os -import subprocess -import sys -import re - -from distutils.errors import DistutilsExecError, DistutilsPlatformError, \ - CompileError, LibError, LinkError -from distutils.ccompiler import CCompiler, gen_lib_options -from distutils import log -from distutils.util import get_platform - -import winreg - -RegOpenKeyEx = winreg.OpenKeyEx -RegEnumKey = winreg.EnumKey -RegEnumValue = winreg.EnumValue -RegError = winreg.error - -HKEYS = (winreg.HKEY_USERS, - winreg.HKEY_CURRENT_USER, - winreg.HKEY_LOCAL_MACHINE, - winreg.HKEY_CLASSES_ROOT) - -NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32) -if NATIVE_WIN64: - # Visual C++ is a 32-bit application, so we need to look in - # the corresponding registry branch, if we're running a - # 64-bit Python on Win64 - VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f" - WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows" - NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework" -else: - VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f" - WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows" - NET_BASE = r"Software\Microsoft\.NETFramework" - -# A map keyed by get_platform() return values to values accepted by -# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is -# the param to cross-compile on x86 targeting amd64.) -PLAT_TO_VCVARS = { - 'win32' : 'x86', - 'win-amd64' : 'amd64', -} - -class Reg: - """Helper class to read values from the registry - """ - - def get_value(cls, path, key): - for base in HKEYS: - d = cls.read_values(base, path) - if d and key in d: - return d[key] - raise KeyError(key) - get_value = classmethod(get_value) - - def read_keys(cls, base, key): - """Return list of registry keys.""" - try: - handle = RegOpenKeyEx(base, key) - except RegError: - return None - L = [] - i = 0 - while True: - try: - k = RegEnumKey(handle, i) - except RegError: - break - L.append(k) - i += 1 - return L - read_keys = classmethod(read_keys) - - def read_values(cls, base, key): - """Return dict of registry keys and values. - - All names are converted to lowercase. - """ - try: - handle = RegOpenKeyEx(base, key) - except RegError: - return None - d = {} - i = 0 - while True: - try: - name, value, type = RegEnumValue(handle, i) - except RegError: - break - name = name.lower() - d[cls.convert_mbcs(name)] = cls.convert_mbcs(value) - i += 1 - return d - read_values = classmethod(read_values) - - def convert_mbcs(s): - dec = getattr(s, "decode", None) - if dec is not None: - try: - s = dec("mbcs") - except UnicodeError: - pass - return s - convert_mbcs = staticmethod(convert_mbcs) - -class MacroExpander: - - def __init__(self, version): - self.macros = {} - self.vsbase = VS_BASE % version - self.load_macros(version) - - def set_macro(self, macro, path, key): - self.macros["$(%s)" % macro] = Reg.get_value(path, key) - - def load_macros(self, version): - self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir") - self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir") - self.set_macro("FrameworkDir", NET_BASE, "installroot") - try: - if version >= 8.0: - self.set_macro("FrameworkSDKDir", NET_BASE, - "sdkinstallrootv2.0") - else: - raise KeyError("sdkinstallrootv2.0") - except KeyError: - raise DistutilsPlatformError( - """Python was built with Visual Studio 2008; -extensions must be built with a compiler than can generate compatible binaries. -Visual Studio 2008 was not found on this system. If you have Cygwin installed, -you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") - - if version >= 9.0: - self.set_macro("FrameworkVersion", self.vsbase, "clr version") - self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder") - else: - p = r"Software\Microsoft\NET Framework Setup\Product" - for base in HKEYS: - try: - h = RegOpenKeyEx(base, p) - except RegError: - continue - key = RegEnumKey(h, 0) - d = Reg.get_value(base, r"%s\%s" % (p, key)) - self.macros["$(FrameworkVersion)"] = d["version"] - - def sub(self, s): - for k, v in self.macros.items(): - s = s.replace(k, v) - return s - -def get_build_version(): - """Return the version of MSVC that was used to build Python. - - For Python 2.3 and up, the version number is included in - sys.version. For earlier versions, assume the compiler is MSVC 6. - """ - prefix = "MSC v." - i = sys.version.find(prefix) - if i == -1: - return 6 - i = i + len(prefix) - s, rest = sys.version[i:].split(" ", 1) - majorVersion = int(s[:-2]) - 6 - if majorVersion >= 13: - # v13 was skipped and should be v14 - majorVersion += 1 - minorVersion = int(s[2:3]) / 10.0 - # I don't think paths are affected by minor version in version 6 - if majorVersion == 6: - minorVersion = 0 - if majorVersion >= 6: - return majorVersion + minorVersion - # else we don't know what version of the compiler this is - return None - -def normalize_and_reduce_paths(paths): - """Return a list of normalized paths with duplicates removed. - - The current order of paths is maintained. - """ - # Paths are normalized so things like: /a and /a/ aren't both preserved. - reduced_paths = [] - for p in paths: - np = os.path.normpath(p) - # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set. - if np not in reduced_paths: - reduced_paths.append(np) - return reduced_paths - -def removeDuplicates(variable): - """Remove duplicate values of an environment variable. - """ - oldList = variable.split(os.pathsep) - newList = [] - for i in oldList: - if i not in newList: - newList.append(i) - newVariable = os.pathsep.join(newList) - return newVariable - -def find_vcvarsall(version): - """Find the vcvarsall.bat file - - At first it tries to find the productdir of VS 2008 in the registry. If - that fails it falls back to the VS90COMNTOOLS env var. - """ - vsbase = VS_BASE % version - try: - productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, - "productdir") - except KeyError: - log.debug("Unable to find productdir in registry") - productdir = None - - if not productdir or not os.path.isdir(productdir): - toolskey = "VS%0.f0COMNTOOLS" % version - toolsdir = os.environ.get(toolskey, None) - - if toolsdir and os.path.isdir(toolsdir): - productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC") - productdir = os.path.abspath(productdir) - if not os.path.isdir(productdir): - log.debug("%s is not a valid directory" % productdir) - return None - else: - log.debug("Env var %s is not set or invalid" % toolskey) - if not productdir: - log.debug("No productdir found") - return None - vcvarsall = os.path.join(productdir, "vcvarsall.bat") - if os.path.isfile(vcvarsall): - return vcvarsall - log.debug("Unable to find vcvarsall.bat") - return None - -def query_vcvarsall(version, arch="x86"): - """Launch vcvarsall.bat and read the settings from its environment - """ - vcvarsall = find_vcvarsall(version) - interesting = {"include", "lib", "libpath", "path"} - result = {} - - if vcvarsall is None: - raise DistutilsPlatformError("Unable to find vcvarsall.bat") - log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version) - popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - try: - stdout, stderr = popen.communicate() - if popen.wait() != 0: - raise DistutilsPlatformError(stderr.decode("mbcs")) - - stdout = stdout.decode("mbcs") - for line in stdout.split("\n"): - line = Reg.convert_mbcs(line) - if '=' not in line: - continue - line = line.strip() - key, value = line.split('=', 1) - key = key.lower() - if key in interesting: - if value.endswith(os.pathsep): - value = value[:-1] - result[key] = removeDuplicates(value) - - finally: - popen.stdout.close() - popen.stderr.close() - - if len(result) != len(interesting): - raise ValueError(str(list(result.keys()))) - - return result - -# More globals -VERSION = get_build_version() -# MACROS = MacroExpander(VERSION) - -class MSVCCompiler(CCompiler) : - """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" - - compiler_type = 'msvc' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - _rc_extensions = ['.rc'] - _mc_extensions = ['.mc'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = (_c_extensions + _cpp_extensions + - _rc_extensions + _mc_extensions) - res_extension = '.res' - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) - self.__version = VERSION - self.__root = r"Software\Microsoft\VisualStudio" - # self.__macros = MACROS - self.__paths = [] - # target platform (.plat_name is consistent with 'bdist') - self.plat_name = None - self.__arch = None # deprecated name - self.initialized = False - - def initialize(self, plat_name=None): - # multi-init means we would need to check platform same each time... - assert not self.initialized, "don't init multiple times" - if self.__version < 8.0: - raise DistutilsPlatformError("VC %0.1f is not supported by this module" % self.__version) - if plat_name is None: - plat_name = get_platform() - # sanity check for platforms to prevent obscure errors later. - ok_plats = 'win32', 'win-amd64' - if plat_name not in ok_plats: - raise DistutilsPlatformError("--plat-name must be one of %s" % - (ok_plats,)) - - if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): - # Assume that the SDK set up everything alright; don't try to be - # smarter - self.cc = "cl.exe" - self.linker = "link.exe" - self.lib = "lib.exe" - self.rc = "rc.exe" - self.mc = "mc.exe" - else: - # On x86, 'vcvars32.bat amd64' creates an env that doesn't work; - # to cross compile, you use 'x86_amd64'. - # On AMD64, 'vcvars32.bat amd64' is a native build env; to cross - # compile use 'x86' (ie, it runs the x86 compiler directly) - if plat_name == get_platform() or plat_name == 'win32': - # native build or cross-compile to win32 - plat_spec = PLAT_TO_VCVARS[plat_name] - else: - # cross compile from win32 -> some 64bit - plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \ - PLAT_TO_VCVARS[plat_name] - - vc_env = query_vcvarsall(VERSION, plat_spec) - - self.__paths = vc_env['path'].split(os.pathsep) - os.environ['lib'] = vc_env['lib'] - os.environ['include'] = vc_env['include'] - - if len(self.__paths) == 0: - raise DistutilsPlatformError("Python was built with %s, " - "and extensions need to be built with the same " - "version of the compiler, but it isn't installed." - % self.__product) - - self.cc = self.find_exe("cl.exe") - self.linker = self.find_exe("link.exe") - self.lib = self.find_exe("lib.exe") - self.rc = self.find_exe("rc.exe") # resource compiler - self.mc = self.find_exe("mc.exe") # message compiler - #self.set_path_env_var('lib') - #self.set_path_env_var('include') - - # extend the MSVC path with the current path - try: - for p in os.environ['path'].split(';'): - self.__paths.append(p) - except KeyError: - pass - self.__paths = normalize_and_reduce_paths(self.__paths) - os.environ['path'] = ";".join(self.__paths) - - self.preprocess_options = None - if self.__arch == "x86": - self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', - '/DNDEBUG'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Z7', '/D_DEBUG'] - else: - # Win64 - self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GS-' , - '/DNDEBUG'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', - '/Z7', '/D_DEBUG'] - - self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] - if self.__version >= 7: - self.ldflags_shared_debug = [ - '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' - ] - self.ldflags_static = [ '/nologo'] - - self.initialized = True - - # -- Worker methods ------------------------------------------------ - - def object_filenames(self, - source_filenames, - strip_dir=0, - output_dir=''): - # Copied from ccompiler.py, extended to return .res as 'object'-file - # for .rc input file - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - (base, ext) = os.path.splitext (src_name) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if ext not in self.src_extensions: - # Better to raise an exception instead of silently continuing - # and later complain about sources and targets having - # different lengths - raise CompileError ("Don't know how to compile %s" % src_name) - if strip_dir: - base = os.path.basename (base) - if ext in self._rc_extensions: - obj_names.append (os.path.join (output_dir, - base + self.res_extension)) - elif ext in self._mc_extensions: - obj_names.append (os.path.join (output_dir, - base + self.res_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - - def compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): - - if not self.initialized: - self.initialize() - compile_info = self._setup_compile(output_dir, macros, include_dirs, - sources, depends, extra_postargs) - macros, objects, extra_postargs, pp_opts, build = compile_info - - compile_opts = extra_preargs or [] - compile_opts.append ('/c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - if debug: - # pass the full pathname to MSVC in debug mode, - # this allows the debugger to find the source file - # without asking the user to browse for it - src = os.path.abspath(src) - - if ext in self._c_extensions: - input_opt = "/Tc" + src - elif ext in self._cpp_extensions: - input_opt = "/Tp" + src - elif ext in self._rc_extensions: - # compile .RC to .RES file - input_opt = src - output_opt = "/fo" + obj - try: - self.spawn([self.rc] + pp_opts + - [output_opt] + [input_opt]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue - elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. - # * '-h dir' specifies the directory for the - # generated include file - # * '-r dir' specifies the target directory of the - # generated RC file and the binary message resource - # it includes - # - # For now (since there are no options to change this), - # we use the source-directory for the include file and - # the build directory for the RC file and message - # resources. This works at least for win32all. - h_dir = os.path.dirname(src) - rc_dir = os.path.dirname(obj) - try: - # first compile .MC to .RC and .H file - self.spawn([self.mc] + - ['-h', h_dir, '-r', rc_dir] + [src]) - base, _ = os.path.splitext (os.path.basename (src)) - rc_file = os.path.join (rc_dir, base + '.rc') - # then compile .RC to .RES file - self.spawn([self.rc] + - ["/fo" + obj] + [rc_file]) - - except DistutilsExecError as msg: - raise CompileError(msg) - continue - else: - # how to handle this file? - raise CompileError("Don't know how to compile %s to %s" - % (src, obj)) - - output_opt = "/Fo" + obj - try: - self.spawn([self.cc] + compile_opts + pp_opts + - [input_opt, output_opt] + - extra_postargs) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - - def create_static_lib(self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): - - if not self.initialized: - self.initialize() - (objects, output_dir) = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, - output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = objects + ['/OUT:' + output_filename] - if debug: - pass # XXX what goes here? - try: - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - - def link(self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - if not self.initialized: - self.initialize() - (objects, output_dir) = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, - runtime_library_dirs) - (libraries, library_dirs, runtime_library_dirs) = fixed_args - - if runtime_library_dirs: - self.warn ("I don't know what to do with 'runtime_library_dirs': " - + str (runtime_library_dirs)) - - lib_opts = gen_lib_options(self, - library_dirs, runtime_library_dirs, - libraries) - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - if target_desc == CCompiler.EXECUTABLE: - if debug: - ldflags = self.ldflags_shared_debug[1:] - else: - ldflags = self.ldflags_shared[1:] - else: - if debug: - ldflags = self.ldflags_shared_debug - else: - ldflags = self.ldflags_shared - - export_opts = [] - for sym in (export_symbols or []): - export_opts.append("/EXPORT:" + sym) - - ld_args = (ldflags + lib_opts + export_opts + - objects + ['/OUT:' + output_filename]) - - # The MSVC linker generates .lib and .exp files, which cannot be - # suppressed by any linker switches. The .lib files may even be - # needed! Make sure they are generated in the temporary build - # directory. Since they have different names for debug and release - # builds, they can go into the same directory. - build_temp = os.path.dirname(objects[0]) - if export_symbols is not None: - (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename)) - implib_file = os.path.join( - build_temp, - self.library_filename(dll_name)) - ld_args.append ('/IMPLIB:' + implib_file) - - self.manifest_setup_ldargs(output_filename, build_temp, ld_args) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - self.mkpath(os.path.dirname(output_filename)) - try: - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - - # embed the manifest - # XXX - this is somewhat fragile - if mt.exe fails, distutils - # will still consider the DLL up-to-date, but it will not have a - # manifest. Maybe we should link to a temp file? OTOH, that - # implies a build environment error that shouldn't go undetected. - mfinfo = self.manifest_get_embed_info(target_desc, ld_args) - if mfinfo is not None: - mffilename, mfid = mfinfo - out_arg = '-outputresource:%s;%s' % (output_filename, mfid) - try: - self.spawn(['mt.exe', '-nologo', '-manifest', - mffilename, out_arg]) - except DistutilsExecError as msg: - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - # If we need a manifest at all, an embedded manifest is recommended. - # See MSDN article titled - # "How to: Embed a Manifest Inside a C/C++ Application" - # (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx) - # Ask the linker to generate the manifest in the temp dir, so - # we can check it, and possibly embed it, later. - temp_manifest = os.path.join( - build_temp, - os.path.basename(output_filename) + ".manifest") - ld_args.append('/MANIFESTFILE:' + temp_manifest) - - def manifest_get_embed_info(self, target_desc, ld_args): - # If a manifest should be embedded, return a tuple of - # (manifest_filename, resource_id). Returns None if no manifest - # should be embedded. See http://bugs.python.org/issue7833 for why - # we want to avoid any manifest for extension modules if we can) - for arg in ld_args: - if arg.startswith("/MANIFESTFILE:"): - temp_manifest = arg.split(":", 1)[1] - break - else: - # no /MANIFESTFILE so nothing to do. - return None - if target_desc == CCompiler.EXECUTABLE: - # by default, executables always get the manifest with the - # CRT referenced. - mfid = 1 - else: - # Extension modules try and avoid any manifest if possible. - mfid = 2 - temp_manifest = self._remove_visual_c_ref(temp_manifest) - if temp_manifest is None: - return None - return temp_manifest, mfid - - def _remove_visual_c_ref(self, manifest_file): - try: - # Remove references to the Visual C runtime, so they will - # fall through to the Visual C dependency of Python.exe. - # This way, when installed for a restricted user (e.g. - # runtimes are not in WinSxS folder, but in Python's own - # folder), the runtimes do not need to be in every folder - # with .pyd's. - # Returns either the filename of the modified manifest or - # None if no manifest should be embedded. - manifest_f = open(manifest_file) - try: - manifest_buf = manifest_f.read() - finally: - manifest_f.close() - pattern = re.compile( - r"""|)""", - re.DOTALL) - manifest_buf = re.sub(pattern, "", manifest_buf) - pattern = r"\s*" - manifest_buf = re.sub(pattern, "", manifest_buf) - # Now see if any other assemblies are referenced - if not, we - # don't want a manifest embedded. - pattern = re.compile( - r"""|)""", re.DOTALL) - if re.search(pattern, manifest_buf) is None: - return None - - manifest_f = open(manifest_file, 'w') - try: - manifest_f.write(manifest_buf) - return manifest_file - finally: - manifest_f.close() - except OSError: - pass - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "/LIBPATH:" + dir - - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC++") - - def library_option(self, lib): - return self.library_filename(lib) - - - def find_library_file(self, dirs, lib, debug=0): - # Prefer a debugging library if found (and requested), but deal - # with it if we don't have one. - if debug: - try_names = [lib + "_d", lib] - else: - try_names = [lib] - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename (name)) - if os.path.exists(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None - - # Helper methods for using the MSVC registry settings - - def find_exe(self, exe): - """Return path to an MSVC executable program. - - Tries to find the program in several places: first, one of the - MSVC program search paths from the registry; next, the directories - in the PATH environment variable. If any of those work, return an - absolute path that is known to exist. If none of them work, just - return the original program name, 'exe'. - """ - for p in self.__paths: - fn = os.path.join(os.path.abspath(p), exe) - if os.path.isfile(fn): - return fn - - # didn't find it; try existing path - for p in os.environ['Path'].split(';'): - fn = os.path.join(os.path.abspath(p),exe) - if os.path.isfile(fn): - return fn - - return exe diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/msvccompiler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/msvccompiler.py deleted file mode 100755 index e1367b8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/msvccompiler.py +++ /dev/null @@ -1,643 +0,0 @@ -"""distutils.msvccompiler - -Contains MSVCCompiler, an implementation of the abstract CCompiler class -for the Microsoft Visual Studio. -""" - -# Written by Perry Stoll -# hacked by Robin Becker and Thomas Heller to do a better job of -# finding DevStudio (through the registry) - -import sys, os -from distutils.errors import \ - DistutilsExecError, DistutilsPlatformError, \ - CompileError, LibError, LinkError -from distutils.ccompiler import \ - CCompiler, gen_lib_options -from distutils import log - -_can_read_reg = False -try: - import winreg - - _can_read_reg = True - hkey_mod = winreg - - RegOpenKeyEx = winreg.OpenKeyEx - RegEnumKey = winreg.EnumKey - RegEnumValue = winreg.EnumValue - RegError = winreg.error - -except ImportError: - try: - import win32api - import win32con - _can_read_reg = True - hkey_mod = win32con - - RegOpenKeyEx = win32api.RegOpenKeyEx - RegEnumKey = win32api.RegEnumKey - RegEnumValue = win32api.RegEnumValue - RegError = win32api.error - except ImportError: - log.info("Warning: Can't read registry to find the " - "necessary compiler setting\n" - "Make sure that Python modules winreg, " - "win32api or win32con are installed.") - pass - -if _can_read_reg: - HKEYS = (hkey_mod.HKEY_USERS, - hkey_mod.HKEY_CURRENT_USER, - hkey_mod.HKEY_LOCAL_MACHINE, - hkey_mod.HKEY_CLASSES_ROOT) - -def read_keys(base, key): - """Return list of registry keys.""" - try: - handle = RegOpenKeyEx(base, key) - except RegError: - return None - L = [] - i = 0 - while True: - try: - k = RegEnumKey(handle, i) - except RegError: - break - L.append(k) - i += 1 - return L - -def read_values(base, key): - """Return dict of registry keys and values. - - All names are converted to lowercase. - """ - try: - handle = RegOpenKeyEx(base, key) - except RegError: - return None - d = {} - i = 0 - while True: - try: - name, value, type = RegEnumValue(handle, i) - except RegError: - break - name = name.lower() - d[convert_mbcs(name)] = convert_mbcs(value) - i += 1 - return d - -def convert_mbcs(s): - dec = getattr(s, "decode", None) - if dec is not None: - try: - s = dec("mbcs") - except UnicodeError: - pass - return s - -class MacroExpander: - def __init__(self, version): - self.macros = {} - self.load_macros(version) - - def set_macro(self, macro, path, key): - for base in HKEYS: - d = read_values(base, path) - if d: - self.macros["$(%s)" % macro] = d[key] - break - - def load_macros(self, version): - vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version - self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir") - self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir") - net = r"Software\Microsoft\.NETFramework" - self.set_macro("FrameworkDir", net, "installroot") - try: - if version > 7.0: - self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1") - else: - self.set_macro("FrameworkSDKDir", net, "sdkinstallroot") - except KeyError as exc: # - raise DistutilsPlatformError( - """Python was built with Visual Studio 2003; -extensions must be built with a compiler than can generate compatible binaries. -Visual Studio 2003 was not found on this system. If you have Cygwin installed, -you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") - - p = r"Software\Microsoft\NET Framework Setup\Product" - for base in HKEYS: - try: - h = RegOpenKeyEx(base, p) - except RegError: - continue - key = RegEnumKey(h, 0) - d = read_values(base, r"%s\%s" % (p, key)) - self.macros["$(FrameworkVersion)"] = d["version"] - - def sub(self, s): - for k, v in self.macros.items(): - s = s.replace(k, v) - return s - -def get_build_version(): - """Return the version of MSVC that was used to build Python. - - For Python 2.3 and up, the version number is included in - sys.version. For earlier versions, assume the compiler is MSVC 6. - """ - prefix = "MSC v." - i = sys.version.find(prefix) - if i == -1: - return 6 - i = i + len(prefix) - s, rest = sys.version[i:].split(" ", 1) - majorVersion = int(s[:-2]) - 6 - if majorVersion >= 13: - # v13 was skipped and should be v14 - majorVersion += 1 - minorVersion = int(s[2:3]) / 10.0 - # I don't think paths are affected by minor version in version 6 - if majorVersion == 6: - minorVersion = 0 - if majorVersion >= 6: - return majorVersion + minorVersion - # else we don't know what version of the compiler this is - return None - -def get_build_architecture(): - """Return the processor architecture. - - Possible results are "Intel" or "AMD64". - """ - - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return "Intel" - j = sys.version.find(")", i) - return sys.version[i+len(prefix):j] - -def normalize_and_reduce_paths(paths): - """Return a list of normalized paths with duplicates removed. - - The current order of paths is maintained. - """ - # Paths are normalized so things like: /a and /a/ aren't both preserved. - reduced_paths = [] - for p in paths: - np = os.path.normpath(p) - # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set. - if np not in reduced_paths: - reduced_paths.append(np) - return reduced_paths - - -class MSVCCompiler(CCompiler) : - """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" - - compiler_type = 'msvc' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - _rc_extensions = ['.rc'] - _mc_extensions = ['.mc'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = (_c_extensions + _cpp_extensions + - _rc_extensions + _mc_extensions) - res_extension = '.res' - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) - self.__version = get_build_version() - self.__arch = get_build_architecture() - if self.__arch == "Intel": - # x86 - if self.__version >= 7: - self.__root = r"Software\Microsoft\VisualStudio" - self.__macros = MacroExpander(self.__version) - else: - self.__root = r"Software\Microsoft\Devstudio" - self.__product = "Visual Studio version %s" % self.__version - else: - # Win64. Assume this was built with the platform SDK - self.__product = "Microsoft SDK compiler %s" % (self.__version + 6) - - self.initialized = False - - def initialize(self): - self.__paths = [] - if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): - # Assume that the SDK set up everything alright; don't try to be - # smarter - self.cc = "cl.exe" - self.linker = "link.exe" - self.lib = "lib.exe" - self.rc = "rc.exe" - self.mc = "mc.exe" - else: - self.__paths = self.get_msvc_paths("path") - - if len(self.__paths) == 0: - raise DistutilsPlatformError("Python was built with %s, " - "and extensions need to be built with the same " - "version of the compiler, but it isn't installed." - % self.__product) - - self.cc = self.find_exe("cl.exe") - self.linker = self.find_exe("link.exe") - self.lib = self.find_exe("lib.exe") - self.rc = self.find_exe("rc.exe") # resource compiler - self.mc = self.find_exe("mc.exe") # message compiler - self.set_path_env_var('lib') - self.set_path_env_var('include') - - # extend the MSVC path with the current path - try: - for p in os.environ['path'].split(';'): - self.__paths.append(p) - except KeyError: - pass - self.__paths = normalize_and_reduce_paths(self.__paths) - os.environ['path'] = ";".join(self.__paths) - - self.preprocess_options = None - if self.__arch == "Intel": - self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GX' , - '/DNDEBUG'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX', - '/Z7', '/D_DEBUG'] - else: - # Win64 - self.compile_options = [ '/nologo', '/O2', '/MD', '/W3', '/GS-' , - '/DNDEBUG'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', - '/Z7', '/D_DEBUG'] - - self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] - if self.__version >= 7: - self.ldflags_shared_debug = [ - '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' - ] - else: - self.ldflags_shared_debug = [ - '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG' - ] - self.ldflags_static = [ '/nologo'] - - self.initialized = True - - # -- Worker methods ------------------------------------------------ - - def object_filenames(self, - source_filenames, - strip_dir=0, - output_dir=''): - # Copied from ccompiler.py, extended to return .res as 'object'-file - # for .rc input file - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - (base, ext) = os.path.splitext (src_name) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if ext not in self.src_extensions: - # Better to raise an exception instead of silently continuing - # and later complain about sources and targets having - # different lengths - raise CompileError ("Don't know how to compile %s" % src_name) - if strip_dir: - base = os.path.basename (base) - if ext in self._rc_extensions: - obj_names.append (os.path.join (output_dir, - base + self.res_extension)) - elif ext in self._mc_extensions: - obj_names.append (os.path.join (output_dir, - base + self.res_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - - def compile(self, sources, - output_dir=None, macros=None, include_dirs=None, debug=0, - extra_preargs=None, extra_postargs=None, depends=None): - - if not self.initialized: - self.initialize() - compile_info = self._setup_compile(output_dir, macros, include_dirs, - sources, depends, extra_postargs) - macros, objects, extra_postargs, pp_opts, build = compile_info - - compile_opts = extra_preargs or [] - compile_opts.append ('/c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - if debug: - # pass the full pathname to MSVC in debug mode, - # this allows the debugger to find the source file - # without asking the user to browse for it - src = os.path.abspath(src) - - if ext in self._c_extensions: - input_opt = "/Tc" + src - elif ext in self._cpp_extensions: - input_opt = "/Tp" + src - elif ext in self._rc_extensions: - # compile .RC to .RES file - input_opt = src - output_opt = "/fo" + obj - try: - self.spawn([self.rc] + pp_opts + - [output_opt] + [input_opt]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue - elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. - # * '-h dir' specifies the directory for the - # generated include file - # * '-r dir' specifies the target directory of the - # generated RC file and the binary message resource - # it includes - # - # For now (since there are no options to change this), - # we use the source-directory for the include file and - # the build directory for the RC file and message - # resources. This works at least for win32all. - h_dir = os.path.dirname(src) - rc_dir = os.path.dirname(obj) - try: - # first compile .MC to .RC and .H file - self.spawn([self.mc] + - ['-h', h_dir, '-r', rc_dir] + [src]) - base, _ = os.path.splitext (os.path.basename (src)) - rc_file = os.path.join (rc_dir, base + '.rc') - # then compile .RC to .RES file - self.spawn([self.rc] + - ["/fo" + obj] + [rc_file]) - - except DistutilsExecError as msg: - raise CompileError(msg) - continue - else: - # how to handle this file? - raise CompileError("Don't know how to compile %s to %s" - % (src, obj)) - - output_opt = "/Fo" + obj - try: - self.spawn([self.cc] + compile_opts + pp_opts + - [input_opt, output_opt] + - extra_postargs) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - - def create_static_lib(self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): - - if not self.initialized: - self.initialize() - (objects, output_dir) = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, - output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = objects + ['/OUT:' + output_filename] - if debug: - pass # XXX what goes here? - try: - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - - def link(self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - if not self.initialized: - self.initialize() - (objects, output_dir) = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, - runtime_library_dirs) - (libraries, library_dirs, runtime_library_dirs) = fixed_args - - if runtime_library_dirs: - self.warn ("I don't know what to do with 'runtime_library_dirs': " - + str (runtime_library_dirs)) - - lib_opts = gen_lib_options(self, - library_dirs, runtime_library_dirs, - libraries) - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - if target_desc == CCompiler.EXECUTABLE: - if debug: - ldflags = self.ldflags_shared_debug[1:] - else: - ldflags = self.ldflags_shared[1:] - else: - if debug: - ldflags = self.ldflags_shared_debug - else: - ldflags = self.ldflags_shared - - export_opts = [] - for sym in (export_symbols or []): - export_opts.append("/EXPORT:" + sym) - - ld_args = (ldflags + lib_opts + export_opts + - objects + ['/OUT:' + output_filename]) - - # The MSVC linker generates .lib and .exp files, which cannot be - # suppressed by any linker switches. The .lib files may even be - # needed! Make sure they are generated in the temporary build - # directory. Since they have different names for debug and release - # builds, they can go into the same directory. - if export_symbols is not None: - (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename)) - implib_file = os.path.join( - os.path.dirname(objects[0]), - self.library_filename(dll_name)) - ld_args.append ('/IMPLIB:' + implib_file) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - self.mkpath(os.path.dirname(output_filename)) - try: - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - - else: - log.debug("skipping %s (up-to-date)", output_filename) - - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "/LIBPATH:" + dir - - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC++") - - def library_option(self, lib): - return self.library_filename(lib) - - - def find_library_file(self, dirs, lib, debug=0): - # Prefer a debugging library if found (and requested), but deal - # with it if we don't have one. - if debug: - try_names = [lib + "_d", lib] - else: - try_names = [lib] - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename (name)) - if os.path.exists(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None - - # Helper methods for using the MSVC registry settings - - def find_exe(self, exe): - """Return path to an MSVC executable program. - - Tries to find the program in several places: first, one of the - MSVC program search paths from the registry; next, the directories - in the PATH environment variable. If any of those work, return an - absolute path that is known to exist. If none of them work, just - return the original program name, 'exe'. - """ - for p in self.__paths: - fn = os.path.join(os.path.abspath(p), exe) - if os.path.isfile(fn): - return fn - - # didn't find it; try existing path - for p in os.environ['Path'].split(';'): - fn = os.path.join(os.path.abspath(p),exe) - if os.path.isfile(fn): - return fn - - return exe - - def get_msvc_paths(self, path, platform='x86'): - """Get a list of devstudio directories (include, lib or path). - - Return a list of strings. The list will be empty if unable to - access the registry or appropriate registry keys not found. - """ - if not _can_read_reg: - return [] - - path = path + " dirs" - if self.__version >= 7: - key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories" - % (self.__root, self.__version)) - else: - key = (r"%s\6.0\Build System\Components\Platforms" - r"\Win32 (%s)\Directories" % (self.__root, platform)) - - for base in HKEYS: - d = read_values(base, key) - if d: - if self.__version >= 7: - return self.__macros.sub(d[path]).split(";") - else: - return d[path].split(";") - # MSVC 6 seems to create the registry entries we need only when - # the GUI is run. - if self.__version == 6: - for base in HKEYS: - if read_values(base, r"%s\6.0" % self.__root) is not None: - self.warn("It seems you have Visual Studio 6 installed, " - "but the expected registry settings are not present.\n" - "You must at least run the Visual Studio GUI once " - "so that these entries are created.") - break - return [] - - def set_path_env_var(self, name): - """Set environment variable 'name' to an MSVC path type value. - - This is equivalent to a SET command prior to execution of spawned - commands. - """ - - if name == "lib": - p = self.get_msvc_paths("library") - else: - p = self.get_msvc_paths(name) - if p: - os.environ[name] = ';'.join(p) - - -if get_build_version() >= 8.0: - log.debug("Importing new compiler from distutils.msvc9compiler") - OldMSVCCompiler = MSVCCompiler - from distutils.msvc9compiler import MSVCCompiler - # get_build_architecture not really relevant now we support cross-compile - from distutils.msvc9compiler import MacroExpander diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/py35compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/py35compat.py deleted file mode 100755 index 79b2e7f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/py35compat.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys -import subprocess - - -def __optim_args_from_interpreter_flags(): - """Return a list of command-line arguments reproducing the current - optimization settings in sys.flags.""" - args = [] - value = sys.flags.optimize - if value > 0: - args.append("-" + "O" * value) - return args - - -_optim_args_from_interpreter_flags = getattr( - subprocess, - "_optim_args_from_interpreter_flags", - __optim_args_from_interpreter_flags, -) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/py38compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/py38compat.py deleted file mode 100755 index 7dbe8ce..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/py38compat.py +++ /dev/null @@ -1,7 +0,0 @@ -def aix_platform(osname, version, release): - try: - import _aix_support - return _aix_support.aix_platform() - except ImportError: - pass - return "%s-%s.%s" % (osname, version, release) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/spawn.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/spawn.py deleted file mode 100755 index b2d10e3..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/spawn.py +++ /dev/null @@ -1,106 +0,0 @@ -"""distutils.spawn - -Provides the 'spawn()' function, a front-end to various platform- -specific functions for launching another program in a sub-process. -Also provides the 'find_executable()' to search the path for a given -executable name. -""" - -import sys -import os -import subprocess - -from distutils.errors import DistutilsExecError -from distutils.debug import DEBUG -from distutils import log - - -def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None): - """Run another program, specified as a command list 'cmd', in a new process. - - 'cmd' is just the argument list for the new process, ie. - cmd[0] is the program to run and cmd[1:] are the rest of its arguments. - There is no way to run a program with a name different from that of its - executable. - - If 'search_path' is true (the default), the system's executable - search path will be used to find the program; otherwise, cmd[0] - must be the exact path to the executable. If 'dry_run' is true, - the command will not actually be run. - - Raise DistutilsExecError if running the program fails in any way; just - return on success. - """ - # cmd is documented as a list, but just in case some code passes a tuple - # in, protect our %-formatting code against horrible death - cmd = list(cmd) - - log.info(subprocess.list2cmdline(cmd)) - if dry_run: - return - - if search_path: - executable = find_executable(cmd[0]) - if executable is not None: - cmd[0] = executable - - env = env if env is not None else dict(os.environ) - - if sys.platform == 'darwin': - from distutils.util import MACOSX_VERSION_VAR, get_macosx_target_ver - macosx_target_ver = get_macosx_target_ver() - if macosx_target_ver: - env[MACOSX_VERSION_VAR] = macosx_target_ver - - try: - proc = subprocess.Popen(cmd, env=env) - proc.wait() - exitcode = proc.returncode - except OSError as exc: - if not DEBUG: - cmd = cmd[0] - raise DistutilsExecError( - "command %r failed: %s" % (cmd, exc.args[-1])) from exc - - if exitcode: - if not DEBUG: - cmd = cmd[0] - raise DistutilsExecError( - "command %r failed with exit code %s" % (cmd, exitcode)) - - -def find_executable(executable, path=None): - """Tries to find 'executable' in the directories listed in 'path'. - - A string listing directories separated by 'os.pathsep'; defaults to - os.environ['PATH']. Returns the complete filename or None if not found. - """ - _, ext = os.path.splitext(executable) - if (sys.platform == 'win32') and (ext != '.exe'): - executable = executable + '.exe' - - if os.path.isfile(executable): - return executable - - if path is None: - path = os.environ.get('PATH', None) - if path is None: - try: - path = os.confstr("CS_PATH") - except (AttributeError, ValueError): - # os.confstr() or CS_PATH is not available - path = os.defpath - # bpo-35755: Don't use os.defpath if the PATH environment variable is - # set to an empty string - - # PATH='' doesn't match, whereas PATH=':' looks in the current directory - if not path: - return None - - paths = path.split(os.pathsep) - for p in paths: - f = os.path.join(p, executable) - if os.path.isfile(f): - # the file exists, we have a shot at spawn working - return f - return None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/sysconfig.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/sysconfig.py deleted file mode 100755 index 4a77a43..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/sysconfig.py +++ /dev/null @@ -1,567 +0,0 @@ -"""Provide access to Python's configuration information. The specific -configuration variables available depend heavily on the platform and -configuration. The values may be retrieved using -get_config_var(name), and the list of variables is available via -get_config_vars().keys(). Additional convenience functions are also -available. - -Written by: Fred L. Drake, Jr. -Email: -""" - -import _imp -import os -import re -import sys -import sysconfig - -from .errors import DistutilsPlatformError - -IS_PYPY = '__pypy__' in sys.builtin_module_names - -# These are needed in a couple of spots, so just compute them once. -PREFIX = os.path.normpath(sys.prefix) -EXEC_PREFIX = os.path.normpath(sys.exec_prefix) -BASE_PREFIX = os.path.normpath(sys.base_prefix) -BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) - -# Path to the base directory of the project. On Windows the binary may -# live in project/PCbuild/win32 or project/PCbuild/amd64. -# set for cross builds -if "_PYTHON_PROJECT_BASE" in os.environ: - project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"]) -else: - if sys.executable: - project_base = os.path.dirname(os.path.abspath(sys.executable)) - else: - # sys.executable can be empty if argv[0] has been changed and Python is - # unable to retrieve the real program name - project_base = os.getcwd() - - -# python_build: (Boolean) if true, we're either building Python or -# building an extension with an un-installed Python, so we use -# different (hard-wired) directories. -def _is_python_source_dir(d): - for fn in ("Setup", "Setup.local"): - if os.path.isfile(os.path.join(d, "Modules", fn)): - return True - return False - -_sys_home = getattr(sys, '_home', None) - -if os.name == 'nt': - def _fix_pcbuild(d): - if d and os.path.normcase(d).startswith( - os.path.normcase(os.path.join(PREFIX, "PCbuild"))): - return PREFIX - return d - project_base = _fix_pcbuild(project_base) - _sys_home = _fix_pcbuild(_sys_home) - -def _python_build(): - if _sys_home: - return _is_python_source_dir(_sys_home) - return _is_python_source_dir(project_base) - -python_build = _python_build() - - -# Calculate the build qualifier flags if they are defined. Adding the flags -# to the include and lib directories only makes sense for an installation, not -# an in-source build. -build_flags = '' -try: - if not python_build: - build_flags = sys.abiflags -except AttributeError: - # It's not a configure-based build, so the sys module doesn't have - # this attribute, which is fine. - pass - -def get_python_version(): - """Return a string containing the major and minor Python version, - leaving off the patchlevel. Sample return values could be '1.5' - or '2.2'. - """ - return '%d.%d' % sys.version_info[:2] - - -def get_python_inc(plat_specific=0, prefix=None): - """Return the directory containing installed Python header files. - - If 'plat_specific' is false (the default), this is the path to the - non-platform-specific header files, i.e. Python.h and so on; - otherwise, this is the path to platform-specific header files - (namely pyconfig.h). - - If 'prefix' is supplied, use it instead of sys.base_prefix or - sys.base_exec_prefix -- i.e., ignore 'plat_specific'. - """ - if prefix is None: - prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX - if os.name == "posix": - if IS_PYPY and sys.version_info < (3, 8): - return os.path.join(prefix, 'include') - if python_build: - # Assume the executable is in the build directory. The - # pyconfig.h file should be in the same directory. Since - # the build directory may not be the source directory, we - # must use "srcdir" from the makefile to find the "Include" - # directory. - if plat_specific: - return _sys_home or project_base - else: - incdir = os.path.join(get_config_var('srcdir'), 'Include') - return os.path.normpath(incdir) - implementation = 'pypy' if IS_PYPY else 'python' - python_dir = implementation + get_python_version() + build_flags - return os.path.join(prefix, "include", python_dir) - elif os.name == "nt": - if python_build: - # Include both the include and PC dir to ensure we can find - # pyconfig.h - return (os.path.join(prefix, "include") + os.path.pathsep + - os.path.join(prefix, "PC")) - return os.path.join(prefix, "include") - else: - raise DistutilsPlatformError( - "I don't know where Python installs its C header files " - "on platform '%s'" % os.name) - - -# allow this behavior to be monkey-patched. Ref pypa/distutils#2. -def _posix_lib(standard_lib, libpython, early_prefix, prefix): - if standard_lib: - return libpython - else: - return os.path.join(libpython, "site-packages") - - -def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): - """Return the directory containing the Python library (standard or - site additions). - - If 'plat_specific' is true, return the directory containing - platform-specific modules, i.e. any module from a non-pure-Python - module distribution; otherwise, return the platform-shared library - directory. If 'standard_lib' is true, return the directory - containing standard Python library modules; otherwise, return the - directory for site-specific modules. - - If 'prefix' is supplied, use it instead of sys.base_prefix or - sys.base_exec_prefix -- i.e., ignore 'plat_specific'. - """ - - if IS_PYPY and sys.version_info < (3, 8): - # PyPy-specific schema - if prefix is None: - prefix = PREFIX - if standard_lib: - return os.path.join(prefix, "lib-python", sys.version[0]) - return os.path.join(prefix, 'site-packages') - - early_prefix = prefix - - if prefix is None: - if standard_lib: - prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX - else: - prefix = plat_specific and EXEC_PREFIX or PREFIX - - if os.name == "posix": - if plat_specific or standard_lib: - # Platform-specific modules (any module from a non-pure-Python - # module distribution) or standard Python library modules. - libdir = getattr(sys, "platlibdir", "lib") - else: - # Pure Python - libdir = "lib" - implementation = 'pypy' if IS_PYPY else 'python' - libpython = os.path.join(prefix, libdir, - implementation + get_python_version()) - return _posix_lib(standard_lib, libpython, early_prefix, prefix) - elif os.name == "nt": - if standard_lib: - return os.path.join(prefix, "Lib") - else: - return os.path.join(prefix, "Lib", "site-packages") - else: - raise DistutilsPlatformError( - "I don't know where Python installs its library " - "on platform '%s'" % os.name) - - - -def customize_compiler(compiler): - """Do any platform-specific customization of a CCompiler instance. - - Mainly needed on Unix, so we can plug in the information that - varies across Unices and is stored in Python's Makefile. - """ - if compiler.compiler_type == "unix": - if sys.platform == "darwin": - # Perform first-time customization of compiler-related - # config vars on OS X now that we know we need a compiler. - # This is primarily to support Pythons from binary - # installers. The kind and paths to build tools on - # the user system may vary significantly from the system - # that Python itself was built on. Also the user OS - # version and build tools may not support the same set - # of CPU architectures for universal builds. - global _config_vars - # Use get_config_var() to ensure _config_vars is initialized. - if not get_config_var('CUSTOMIZED_OSX_COMPILER'): - import _osx_support - _osx_support.customize_compiler(_config_vars) - _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' - - (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ - get_config_vars('CC', 'CXX', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') - - if 'CC' in os.environ: - newcc = os.environ['CC'] - if('LDSHARED' not in os.environ - and ldshared.startswith(cc)): - # If CC is overridden, use that as the default - # command for LDSHARED as well - ldshared = newcc + ldshared[len(cc):] - cc = newcc - if 'CXX' in os.environ: - cxx = os.environ['CXX'] - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = cflags + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - if 'AR' in os.environ: - ar = os.environ['AR'] - if 'ARFLAGS' in os.environ: - archiver = ar + ' ' + os.environ['ARFLAGS'] - else: - archiver = ar + ' ' + ar_flags - - cc_cmd = cc + ' ' + cflags - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - compiler_cxx=cxx, - linker_so=ldshared, - linker_exe=cc, - archiver=archiver) - - if 'RANLIB' in os.environ and compiler.executables.get('ranlib', None): - compiler.set_executables(ranlib=os.environ['RANLIB']) - - compiler.shared_lib_extension = shlib_suffix - - -def get_config_h_filename(): - """Return full pathname of installed pyconfig.h file.""" - if python_build: - if os.name == "nt": - inc_dir = os.path.join(_sys_home or project_base, "PC") - else: - inc_dir = _sys_home or project_base - return os.path.join(inc_dir, 'pyconfig.h') - else: - return sysconfig.get_config_h_filename() - - - -def get_makefile_filename(): - """Return full pathname of installed Makefile from the Python build.""" - return sysconfig.get_makefile_filename() - - -def parse_config_h(fp, g=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - return sysconfig.parse_config_h(fp, vars=g) - - -# Regexes needed for parsing Makefile (and similar syntaxes, -# like old-style Setup files). -_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") -_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") -_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - -def parse_makefile(fn, g=None): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - from distutils.text_file import TextFile - fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape") - - if g is None: - g = {} - done = {} - notdone = {} - - while True: - line = fp.readline() - if line is None: # eof - break - m = _variable_rx.match(line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - # do variable interpolation here - while notdone: - for name in list(notdone): - value = notdone[name] - m = _findvar1_rx.search(value) or _findvar2_rx.search(value) - if m: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if name.startswith('PY_') and name[3:] in renamed_variables: - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - else: - done[n] = item = "" - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - del notdone[name] - - if name.startswith('PY_') \ - and name[3:] in renamed_variables: - - name = name[3:] - if name not in done: - done[name] = value - else: - # bogus variable reference; just drop it since we can't deal - del notdone[name] - - fp.close() - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - g.update(done) - return g - - -def expand_makefile_vars(s, vars): - """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in - 'string' according to 'vars' (a dictionary mapping variable names to - values). Variables not present in 'vars' are silently expanded to the - empty string. The variable values in 'vars' should not contain further - variable expansions; if 'vars' is the output of 'parse_makefile()', - you're fine. Returns a variable-expanded version of 's'. - """ - - # This algorithm does multiple expansion, so if vars['foo'] contains - # "${bar}", it will expand ${foo} to ${bar}, and then expand - # ${bar}... and so forth. This is fine as long as 'vars' comes from - # 'parse_makefile()', which takes care of such expansions eagerly, - # according to make's variable expansion semantics. - - while True: - m = _findvar1_rx.search(s) or _findvar2_rx.search(s) - if m: - (beg, end) = m.span() - s = s[0:beg] + vars.get(m.group(1)) + s[end:] - else: - break - return s - - -_config_vars = None - - -_sysconfig_name_tmpl = '_sysconfigdata_{abi}_{platform}_{multiarch}' - - -def _init_posix(): - """Initialize the module as appropriate for POSIX systems.""" - # _sysconfigdata is generated at build time, see the sysconfig module - name = os.environ.get( - '_PYTHON_SYSCONFIGDATA_NAME', - _sysconfig_name_tmpl.format( - abi=sys.abiflags, - platform=sys.platform, - multiarch=getattr(sys.implementation, '_multiarch', ''), - ), - ) - try: - _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0) - except ImportError: - # Python 3.5 and pypy 7.3.1 - _temp = __import__( - '_sysconfigdata', globals(), locals(), ['build_time_vars'], 0) - build_time_vars = _temp.build_time_vars - global _config_vars - _config_vars = {} - _config_vars.update(build_time_vars) - - -def _init_nt(): - """Initialize the module as appropriate for NT""" - g = {} - # set basic install directories - g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) - g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) - - # XXX hmmm.. a normal install puts include files here - g['INCLUDEPY'] = get_python_inc(plat_specific=0) - - g['EXT_SUFFIX'] = _imp.extension_suffixes()[0] - g['EXE'] = ".exe" - g['VERSION'] = get_python_version().replace(".", "") - g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) - - global _config_vars - _config_vars = g - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. Generally this includes - everything needed to build extensions and install both pure modules and - extensions. On Unix, this means every variable defined in Python's - installed Makefile; on Windows it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _config_vars - if _config_vars is None: - func = globals().get("_init_" + os.name) - if func: - func() - else: - _config_vars = {} - - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # Distutils. - _config_vars['prefix'] = PREFIX - _config_vars['exec_prefix'] = EXEC_PREFIX - - if not IS_PYPY: - # For backward compatibility, see issue19555 - SO = _config_vars.get('EXT_SUFFIX') - if SO is not None: - _config_vars['SO'] = SO - - # Always convert srcdir to an absolute path - srcdir = _config_vars.get('srcdir', project_base) - if os.name == 'posix': - if python_build: - # If srcdir is a relative path (typically '.' or '..') - # then it should be interpreted relative to the directory - # containing Makefile. - base = os.path.dirname(get_makefile_filename()) - srcdir = os.path.join(base, srcdir) - else: - # srcdir is not meaningful since the installation is - # spread about the filesystem. We choose the - # directory containing the Makefile since we know it - # exists. - srcdir = os.path.dirname(get_makefile_filename()) - _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir)) - - # Convert srcdir into an absolute path if it appears necessary. - # Normally it is relative to the build directory. However, during - # testing, for example, we might be running a non-installed python - # from a different directory. - if python_build and os.name == "posix": - base = project_base - if (not os.path.isabs(_config_vars['srcdir']) and - base != os.getcwd()): - # srcdir is relative and we are not in the same directory - # as the executable. Assume executable is in the build - # directory and make srcdir absolute. - srcdir = os.path.join(base, _config_vars['srcdir']) - _config_vars['srcdir'] = os.path.normpath(srcdir) - - # OS X platforms require special customization to handle - # multi-architecture, multi-os-version installers - if sys.platform == 'darwin': - import _osx_support - _osx_support.customize_config_vars(_config_vars) - - if args: - vals = [] - for name in args: - vals.append(_config_vars.get(name)) - return vals - else: - return _config_vars - -def get_config_var(name): - """Return the value of a single variable using the dictionary - returned by 'get_config_vars()'. Equivalent to - get_config_vars().get(name) - """ - if name == 'SO': - import warnings - warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2) - return get_config_vars().get(name) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/text_file.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/text_file.py deleted file mode 100755 index 93abad3..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/text_file.py +++ /dev/null @@ -1,286 +0,0 @@ -"""text_file - -provides the TextFile class, which gives an interface to text files -that (optionally) takes care of stripping comments, ignoring blank -lines, and joining lines with backslashes.""" - -import sys, io - - -class TextFile: - """Provides a file-like object that takes care of all the things you - commonly want to do when processing a text file that has some - line-by-line syntax: strip comments (as long as "#" is your - comment character), skip blank lines, join adjacent lines by - escaping the newline (ie. backslash at end of line), strip - leading and/or trailing whitespace. All of these are optional - and independently controllable. - - Provides a 'warn()' method so you can generate warning messages that - report physical line number, even if the logical line in question - spans multiple physical lines. Also provides 'unreadline()' for - implementing line-at-a-time lookahead. - - Constructor is called as: - - TextFile (filename=None, file=None, **options) - - It bombs (RuntimeError) if both 'filename' and 'file' are None; - 'filename' should be a string, and 'file' a file object (or - something that provides 'readline()' and 'close()' methods). It is - recommended that you supply at least 'filename', so that TextFile - can include it in warning messages. If 'file' is not supplied, - TextFile creates its own using 'io.open()'. - - The options are all boolean, and affect the value returned by - 'readline()': - strip_comments [default: true] - strip from "#" to end-of-line, as well as any whitespace - leading up to the "#" -- unless it is escaped by a backslash - lstrip_ws [default: false] - strip leading whitespace from each line before returning it - rstrip_ws [default: true] - strip trailing whitespace (including line terminator!) from - each line before returning it - skip_blanks [default: true} - skip lines that are empty *after* stripping comments and - whitespace. (If both lstrip_ws and rstrip_ws are false, - then some lines may consist of solely whitespace: these will - *not* be skipped, even if 'skip_blanks' is true.) - join_lines [default: false] - if a backslash is the last non-newline character on a line - after stripping comments and whitespace, join the following line - to it to form one "logical line"; if N consecutive lines end - with a backslash, then N+1 physical lines will be joined to - form one logical line. - collapse_join [default: false] - strip leading whitespace from lines that are joined to their - predecessor; only matters if (join_lines and not lstrip_ws) - errors [default: 'strict'] - error handler used to decode the file content - - Note that since 'rstrip_ws' can strip the trailing newline, the - semantics of 'readline()' must differ from those of the builtin file - object's 'readline()' method! In particular, 'readline()' returns - None for end-of-file: an empty string might just be a blank line (or - an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is - not.""" - - default_options = { 'strip_comments': 1, - 'skip_blanks': 1, - 'lstrip_ws': 0, - 'rstrip_ws': 1, - 'join_lines': 0, - 'collapse_join': 0, - 'errors': 'strict', - } - - def __init__(self, filename=None, file=None, **options): - """Construct a new TextFile object. At least one of 'filename' - (a string) and 'file' (a file-like object) must be supplied. - They keyword argument options are described above and affect - the values returned by 'readline()'.""" - if filename is None and file is None: - raise RuntimeError("you must supply either or both of 'filename' and 'file'") - - # set values for all options -- either from client option hash - # or fallback to default_options - for opt in self.default_options.keys(): - if opt in options: - setattr(self, opt, options[opt]) - else: - setattr(self, opt, self.default_options[opt]) - - # sanity check client option hash - for opt in options.keys(): - if opt not in self.default_options: - raise KeyError("invalid TextFile option '%s'" % opt) - - if file is None: - self.open(filename) - else: - self.filename = filename - self.file = file - self.current_line = 0 # assuming that file is at BOF! - - # 'linebuf' is a stack of lines that will be emptied before we - # actually read from the file; it's only populated by an - # 'unreadline()' operation - self.linebuf = [] - - def open(self, filename): - """Open a new file named 'filename'. This overrides both the - 'filename' and 'file' arguments to the constructor.""" - self.filename = filename - self.file = io.open(self.filename, 'r', errors=self.errors) - self.current_line = 0 - - def close(self): - """Close the current file and forget everything we know about it - (filename, current line number).""" - file = self.file - self.file = None - self.filename = None - self.current_line = None - file.close() - - def gen_error(self, msg, line=None): - outmsg = [] - if line is None: - line = self.current_line - outmsg.append(self.filename + ", ") - if isinstance(line, (list, tuple)): - outmsg.append("lines %d-%d: " % tuple(line)) - else: - outmsg.append("line %d: " % line) - outmsg.append(str(msg)) - return "".join(outmsg) - - def error(self, msg, line=None): - raise ValueError("error: " + self.gen_error(msg, line)) - - def warn(self, msg, line=None): - """Print (to stderr) a warning message tied to the current logical - line in the current file. If the current logical line in the - file spans multiple physical lines, the warning refers to the - whole range, eg. "lines 3-5". If 'line' supplied, it overrides - the current line number; it may be a list or tuple to indicate a - range of physical lines, or an integer for a single physical - line.""" - sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n") - - def readline(self): - """Read and return a single logical line from the current file (or - from an internal buffer if lines have previously been "unread" - with 'unreadline()'). If the 'join_lines' option is true, this - may involve reading multiple physical lines concatenated into a - single string. Updates the current line number, so calling - 'warn()' after 'readline()' emits a warning about the physical - line(s) just read. Returns None on end-of-file, since the empty - string can occur if 'rstrip_ws' is true but 'strip_blanks' is - not.""" - # If any "unread" lines waiting in 'linebuf', return the top - # one. (We don't actually buffer read-ahead data -- lines only - # get put in 'linebuf' if the client explicitly does an - # 'unreadline()'. - if self.linebuf: - line = self.linebuf[-1] - del self.linebuf[-1] - return line - - buildup_line = '' - - while True: - # read the line, make it None if EOF - line = self.file.readline() - if line == '': - line = None - - if self.strip_comments and line: - - # Look for the first "#" in the line. If none, never - # mind. If we find one and it's the first character, or - # is not preceded by "\", then it starts a comment -- - # strip the comment, strip whitespace before it, and - # carry on. Otherwise, it's just an escaped "#", so - # unescape it (and any other escaped "#"'s that might be - # lurking in there) and otherwise leave the line alone. - - pos = line.find("#") - if pos == -1: # no "#" -- no comments - pass - - # It's definitely a comment -- either "#" is the first - # character, or it's elsewhere and unescaped. - elif pos == 0 or line[pos-1] != "\\": - # Have to preserve the trailing newline, because it's - # the job of a later step (rstrip_ws) to remove it -- - # and if rstrip_ws is false, we'd better preserve it! - # (NB. this means that if the final line is all comment - # and has no trailing newline, we will think that it's - # EOF; I think that's OK.) - eol = (line[-1] == '\n') and '\n' or '' - line = line[0:pos] + eol - - # If all that's left is whitespace, then skip line - # *now*, before we try to join it to 'buildup_line' -- - # that way constructs like - # hello \\ - # # comment that should be ignored - # there - # result in "hello there". - if line.strip() == "": - continue - else: # it's an escaped "#" - line = line.replace("\\#", "#") - - # did previous line end with a backslash? then accumulate - if self.join_lines and buildup_line: - # oops: end of file - if line is None: - self.warn("continuation line immediately precedes " - "end-of-file") - return buildup_line - - if self.collapse_join: - line = line.lstrip() - line = buildup_line + line - - # careful: pay attention to line number when incrementing it - if isinstance(self.current_line, list): - self.current_line[1] = self.current_line[1] + 1 - else: - self.current_line = [self.current_line, - self.current_line + 1] - # just an ordinary line, read it as usual - else: - if line is None: # eof - return None - - # still have to be careful about incrementing the line number! - if isinstance(self.current_line, list): - self.current_line = self.current_line[1] + 1 - else: - self.current_line = self.current_line + 1 - - # strip whitespace however the client wants (leading and - # trailing, or one or the other, or neither) - if self.lstrip_ws and self.rstrip_ws: - line = line.strip() - elif self.lstrip_ws: - line = line.lstrip() - elif self.rstrip_ws: - line = line.rstrip() - - # blank line (whether we rstrip'ed or not)? skip to next line - # if appropriate - if (line == '' or line == '\n') and self.skip_blanks: - continue - - if self.join_lines: - if line[-1] == '\\': - buildup_line = line[:-1] - continue - - if line[-2:] == '\\\n': - buildup_line = line[0:-2] + '\n' - continue - - # well, I guess there's some actual content there: return it - return line - - def readlines(self): - """Read and return the list of all logical lines remaining in the - current file.""" - lines = [] - while True: - line = self.readline() - if line is None: - return lines - lines.append(line) - - def unreadline(self, line): - """Push 'line' (a string) onto an internal buffer that will be - checked by future 'readline()' calls. Handy for implementing - a parser with line-at-a-time lookahead.""" - self.linebuf.append(line) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/unixccompiler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/unixccompiler.py deleted file mode 100755 index a07e598..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/unixccompiler.py +++ /dev/null @@ -1,325 +0,0 @@ -"""distutils.unixccompiler - -Contains the UnixCCompiler class, a subclass of CCompiler that handles -the "typical" Unix-style command-line C compiler: - * macros defined with -Dname[=value] - * macros undefined with -Uname - * include search directories specified with -Idir - * libraries specified with -lllib - * library search directories specified with -Ldir - * compile handled by 'cc' (or similar) executable with -c option: - compiles .c to .o - * link static library handled by 'ar' command (possibly with 'ranlib') - * link shared library handled by 'cc -shared' -""" - -import os, sys, re, shlex - -from distutils import sysconfig -from distutils.dep_util import newer -from distutils.ccompiler import \ - CCompiler, gen_preprocess_options, gen_lib_options -from distutils.errors import \ - DistutilsExecError, CompileError, LibError, LinkError -from distutils import log - -if sys.platform == 'darwin': - import _osx_support - -# XXX Things not currently handled: -# * optimization/debug/warning flags; we just use whatever's in Python's -# Makefile and live with it. Is this adequate? If not, we might -# have to have a bunch of subclasses GNUCCompiler, SGICCompiler, -# SunCCompiler, and I suspect down that road lies madness. -# * even if we don't know a warning flag from an optimization flag, -# we need some way for outsiders to feed preprocessor/compiler/linker -# flags in to us -- eg. a sysadmin might want to mandate certain flags -# via a site config file, or a user might want to set something for -# compiling this module distribution only via the setup.py command -# line, whatever. As long as these options come from something on the -# current system, they can be as system-dependent as they like, and we -# should just happily stuff them into the preprocessor/compiler/linker -# options and carry on. - - -class UnixCCompiler(CCompiler): - - compiler_type = 'unix' - - # These are used by CCompiler in two places: the constructor sets - # instance attributes 'preprocessor', 'compiler', etc. from them, and - # 'set_executable()' allows any of these to be set. The defaults here - # are pretty generic; they will probably have to be set by an outsider - # (eg. using information discovered by the sysconfig about building - # Python extensions). - executables = {'preprocessor' : None, - 'compiler' : ["cc"], - 'compiler_so' : ["cc"], - 'compiler_cxx' : ["cc"], - 'linker_so' : ["cc", "-shared"], - 'linker_exe' : ["cc"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None, - } - - if sys.platform[:6] == "darwin": - executables['ranlib'] = ["ranlib"] - - # Needed for the filename generation methods provided by the base - # class, CCompiler. NB. whoever instantiates/uses a particular - # UnixCCompiler instance should set 'shared_lib_ext' -- we set a - # reasonable common default here, but it's not necessarily used on all - # Unices! - - src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"] - obj_extension = ".o" - static_lib_extension = ".a" - shared_lib_extension = ".so" - dylib_lib_extension = ".dylib" - xcode_stub_lib_extension = ".tbd" - static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s" - xcode_stub_lib_format = dylib_lib_format - if sys.platform == "cygwin": - exe_extension = ".exe" - - def preprocess(self, source, output_file=None, macros=None, - include_dirs=None, extra_preargs=None, extra_postargs=None): - fixed_args = self._fix_compile_args(None, macros, include_dirs) - ignore, macros, include_dirs = fixed_args - pp_opts = gen_preprocess_options(macros, include_dirs) - pp_args = self.preprocessor + pp_opts - if output_file: - pp_args.extend(['-o', output_file]) - if extra_preargs: - pp_args[:0] = extra_preargs - if extra_postargs: - pp_args.extend(extra_postargs) - pp_args.append(source) - - # We need to preprocess: either we're being forced to, or we're - # generating output to stdout, or there's a target output file and - # the source file is newer than the target (or the target doesn't - # exist). - if self.force or output_file is None or newer(source, output_file): - if output_file: - self.mkpath(os.path.dirname(output_file)) - try: - self.spawn(pp_args) - except DistutilsExecError as msg: - raise CompileError(msg) - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - compiler_so = self.compiler_so - if sys.platform == 'darwin': - compiler_so = _osx_support.compiler_fixup(compiler_so, - cc_args + extra_postargs) - try: - self.spawn(compiler_so + cc_args + [src, '-o', obj] + - extra_postargs) - except DistutilsExecError as msg: - raise CompileError(msg) - - def create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - self.mkpath(os.path.dirname(output_filename)) - self.spawn(self.archiver + - [output_filename] + - objects + self.objects) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - try: - self.spawn(self.ranlib + [output_filename]) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, - runtime_library_dirs) - libraries, library_dirs, runtime_library_dirs = fixed_args - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if not isinstance(output_dir, (str, type(None))): - raise TypeError("'output_dir' must be a string or None") - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - ld_args = (objects + self.objects + - lib_opts + ['-o', output_filename]) - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - try: - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - if target_lang == "c++" and self.compiler_cxx: - # skip over environment variable settings if /usr/bin/env - # is used to set up the linker's environment. - # This is needed on OSX. Note: this assumes that the - # normal and C++ compiler have the same environment - # settings. - i = 0 - if os.path.basename(linker[0]) == "env": - i = 1 - while '=' in linker[i]: - i += 1 - - if os.path.basename(linker[i]) == 'ld_so_aix': - # AIX platforms prefix the compiler with the ld_so_aix - # script, so we need to adjust our linker index - offset = 1 - else: - offset = 0 - - linker[i+offset] = self.compiler_cxx[i] - - if sys.platform == 'darwin': - linker = _osx_support.compiler_fixup(linker, ld_args) - - self.spawn(linker + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "-L" + dir - - def _is_gcc(self, compiler_name): - return "gcc" in compiler_name or "g++" in compiler_name - - def runtime_library_dir_option(self, dir): - # XXX Hackish, at the very least. See Python bug #445902: - # http://sourceforge.net/tracker/index.php - # ?func=detail&aid=445902&group_id=5470&atid=105470 - # Linkers on different platforms need different options to - # specify that directories need to be added to the list of - # directories searched for dependencies when a dynamic library - # is sought. GCC on GNU systems (Linux, FreeBSD, ...) has to - # be told to pass the -R option through to the linker, whereas - # other compilers and gcc on other systems just know this. - # Other compilers may need something slightly different. At - # this time, there's no way to determine this information from - # the configuration data stored in the Python installation, so - # we use this hack. - compiler = os.path.basename(shlex.split(sysconfig.get_config_var("CC"))[0]) - if sys.platform[:6] == "darwin": - from distutils.util import get_macosx_target_ver, split_version - macosx_target_ver = get_macosx_target_ver() - if macosx_target_ver and split_version(macosx_target_ver) >= [10, 5]: - return "-Wl,-rpath," + dir - else: # no support for -rpath on earlier macOS versions - return "-L" + dir - elif sys.platform[:7] == "freebsd": - return "-Wl,-rpath=" + dir - elif sys.platform[:5] == "hp-ux": - if self._is_gcc(compiler): - return ["-Wl,+s", "-L" + dir] - return ["+s", "-L" + dir] - - # For all compilers, `-Wl` is the presumed way to - # pass a compiler option to the linker and `-R` is - # the way to pass an RPATH. - if sysconfig.get_config_var("GNULD") == "yes": - # GNU ld needs an extra option to get a RUNPATH - # instead of just an RPATH. - return "-Wl,--enable-new-dtags,-R" + dir - else: - return "-Wl,-R" + dir - - def library_option(self, lib): - return "-l" + lib - - def find_library_file(self, dirs, lib, debug=0): - shared_f = self.library_filename(lib, lib_type='shared') - dylib_f = self.library_filename(lib, lib_type='dylib') - xcode_stub_f = self.library_filename(lib, lib_type='xcode_stub') - static_f = self.library_filename(lib, lib_type='static') - - if sys.platform == 'darwin': - # On OSX users can specify an alternate SDK using - # '-isysroot', calculate the SDK root if it is specified - # (and use it further on) - # - # Note that, as of Xcode 7, Apple SDKs may contain textual stub - # libraries with .tbd extensions rather than the normal .dylib - # shared libraries installed in /. The Apple compiler tool - # chain handles this transparently but it can cause problems - # for programs that are being built with an SDK and searching - # for specific libraries. Callers of find_library_file need to - # keep in mind that the base filename of the returned SDK library - # file might have a different extension from that of the library - # file installed on the running system, for example: - # /Applications/Xcode.app/Contents/Developer/Platforms/ - # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/ - # usr/lib/libedit.tbd - # vs - # /usr/lib/libedit.dylib - cflags = sysconfig.get_config_var('CFLAGS') - m = re.search(r'-isysroot\s*(\S+)', cflags) - if m is None: - sysroot = '/' - else: - sysroot = m.group(1) - - - - for dir in dirs: - shared = os.path.join(dir, shared_f) - dylib = os.path.join(dir, dylib_f) - static = os.path.join(dir, static_f) - xcode_stub = os.path.join(dir, xcode_stub_f) - - if sys.platform == 'darwin' and ( - dir.startswith('/System/') or ( - dir.startswith('/usr/') and not dir.startswith('/usr/local/'))): - - shared = os.path.join(sysroot, dir[1:], shared_f) - dylib = os.path.join(sysroot, dir[1:], dylib_f) - static = os.path.join(sysroot, dir[1:], static_f) - xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f) - - # We're second-guessing the linker here, with not much hard - # data to go on: GCC seems to prefer the shared library, so I'm - # assuming that *all* Unix C compilers do. And of course I'm - # ignoring even GCC's "-static" option. So sue me. - if os.path.exists(dylib): - return dylib - elif os.path.exists(xcode_stub): - return xcode_stub - elif os.path.exists(shared): - return shared - elif os.path.exists(static): - return static - - # Oops, didn't find it in *any* of 'dirs' - return None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/util.py deleted file mode 100755 index 6d506d7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/util.py +++ /dev/null @@ -1,496 +0,0 @@ -"""distutils.util - -Miscellaneous utility functions -- anything that doesn't fit into -one of the other *util.py modules. -""" - -import os -import re -import importlib.util -import string -import sys -import sysconfig -from distutils.errors import DistutilsPlatformError -from distutils.dep_util import newer -from distutils.spawn import spawn -from distutils import log -from distutils.errors import DistutilsByteCompileError -from .py35compat import _optim_args_from_interpreter_flags - - -def get_host_platform(): - """Return a string that identifies the current platform. This is used mainly to - distinguish platform-specific build directories and platform-specific built - distributions. - """ - - # We initially exposed platforms as defined in Python 3.9 - # even with older Python versions when distutils was split out. - # Now that we delegate to stdlib sysconfig we need to restore this - # in case anyone has started to depend on it. - - if sys.version_info < (3, 8): - if os.name == 'nt': - if '(arm)' in sys.version.lower(): - return 'win-arm32' - if '(arm64)' in sys.version.lower(): - return 'win-arm64' - - if sys.version_info < (3, 9): - if os.name == "posix" and hasattr(os, 'uname'): - osname, host, release, version, machine = os.uname() - if osname[:3] == "aix": - from .py38compat import aix_platform - return aix_platform(osname, version, release) - - return sysconfig.get_platform() - -def get_platform(): - if os.name == 'nt': - TARGET_TO_PLAT = { - 'x86' : 'win32', - 'x64' : 'win-amd64', - 'arm' : 'win-arm32', - 'arm64': 'win-arm64', - } - return TARGET_TO_PLAT.get(os.environ.get('VSCMD_ARG_TGT_ARCH')) or get_host_platform() - else: - return get_host_platform() - - -if sys.platform == 'darwin': - _syscfg_macosx_ver = None # cache the version pulled from sysconfig -MACOSX_VERSION_VAR = 'MACOSX_DEPLOYMENT_TARGET' - -def _clear_cached_macosx_ver(): - """For testing only. Do not call.""" - global _syscfg_macosx_ver - _syscfg_macosx_ver = None - -def get_macosx_target_ver_from_syscfg(): - """Get the version of macOS latched in the Python interpreter configuration. - Returns the version as a string or None if can't obtain one. Cached.""" - global _syscfg_macosx_ver - if _syscfg_macosx_ver is None: - from distutils import sysconfig - ver = sysconfig.get_config_var(MACOSX_VERSION_VAR) or '' - if ver: - _syscfg_macosx_ver = ver - return _syscfg_macosx_ver - -def get_macosx_target_ver(): - """Return the version of macOS for which we are building. - - The target version defaults to the version in sysconfig latched at time - the Python interpreter was built, unless overridden by an environment - variable. If neither source has a value, then None is returned""" - - syscfg_ver = get_macosx_target_ver_from_syscfg() - env_ver = os.environ.get(MACOSX_VERSION_VAR) - - if env_ver: - # Validate overridden version against sysconfig version, if have both. - # Ensure that the deployment target of the build process is not less - # than 10.3 if the interpreter was built for 10.3 or later. This - # ensures extension modules are built with correct compatibility - # values, specifically LDSHARED which can use - # '-undefined dynamic_lookup' which only works on >= 10.3. - if syscfg_ver and split_version(syscfg_ver) >= [10, 3] and \ - split_version(env_ver) < [10, 3]: - my_msg = ('$' + MACOSX_VERSION_VAR + ' mismatch: ' - 'now "%s" but "%s" during configure; ' - 'must use 10.3 or later' - % (env_ver, syscfg_ver)) - raise DistutilsPlatformError(my_msg) - return env_ver - return syscfg_ver - - -def split_version(s): - """Convert a dot-separated string into a list of numbers for comparisons""" - return [int(n) for n in s.split('.')] - - -def convert_path (pathname): - """Return 'pathname' as a name that will work on the native filesystem, - i.e. split it on '/' and put it back together again using the current - directory separator. Needed because filenames in the setup script are - always supplied in Unix style, and have to be converted to the local - convention before we can actually use them in the filesystem. Raises - ValueError on non-Unix-ish systems if 'pathname' either starts or - ends with a slash. - """ - if os.sep == '/': - return pathname - if not pathname: - return pathname - if pathname[0] == '/': - raise ValueError("path '%s' cannot be absolute" % pathname) - if pathname[-1] == '/': - raise ValueError("path '%s' cannot end with '/'" % pathname) - - paths = pathname.split('/') - while '.' in paths: - paths.remove('.') - if not paths: - return os.curdir - return os.path.join(*paths) - -# convert_path () - - -def change_root (new_root, pathname): - """Return 'pathname' with 'new_root' prepended. If 'pathname' is - relative, this is equivalent to "os.path.join(new_root,pathname)". - Otherwise, it requires making 'pathname' relative and then joining the - two, which is tricky on DOS/Windows and Mac OS. - """ - if os.name == 'posix': - if not os.path.isabs(pathname): - return os.path.join(new_root, pathname) - else: - return os.path.join(new_root, pathname[1:]) - - elif os.name == 'nt': - (drive, path) = os.path.splitdrive(pathname) - if path[0] == '\\': - path = path[1:] - return os.path.join(new_root, path) - - else: - raise DistutilsPlatformError("nothing known about platform '%s'" % os.name) - - -_environ_checked = 0 -def check_environ (): - """Ensure that 'os.environ' has all the environment variables we - guarantee that users can use in config files, command-line options, - etc. Currently this includes: - HOME - user's home directory (Unix only) - PLAT - description of the current platform, including hardware - and OS (see 'get_platform()') - """ - global _environ_checked - if _environ_checked: - return - - if os.name == 'posix' and 'HOME' not in os.environ: - try: - import pwd - os.environ['HOME'] = pwd.getpwuid(os.getuid())[5] - except (ImportError, KeyError): - # bpo-10496: if the current user identifier doesn't exist in the - # password database, do nothing - pass - - if 'PLAT' not in os.environ: - os.environ['PLAT'] = get_platform() - - _environ_checked = 1 - - -def subst_vars (s, local_vars): - """ - Perform variable substitution on 'string'. - Variables are indicated by format-style braces ("{var}"). - Variable is substituted by the value found in the 'local_vars' - dictionary or in 'os.environ' if it's not in 'local_vars'. - 'os.environ' is first checked/augmented to guarantee that it contains - certain values: see 'check_environ()'. Raise ValueError for any - variables not found in either 'local_vars' or 'os.environ'. - """ - check_environ() - lookup = dict(os.environ) - lookup.update((name, str(value)) for name, value in local_vars.items()) - try: - return _subst_compat(s).format_map(lookup) - except KeyError as var: - raise ValueError(f"invalid variable {var}") - -# subst_vars () - - -def _subst_compat(s): - """ - Replace shell/Perl-style variable substitution with - format-style. For compatibility. - """ - def _subst(match): - return f'{{{match.group(1)}}}' - repl = re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s) - if repl != s: - import warnings - warnings.warn( - "shell/Perl-style substitions are deprecated", - DeprecationWarning, - ) - return repl - - -def grok_environment_error (exc, prefix="error: "): - # Function kept for backward compatibility. - # Used to try clever things with EnvironmentErrors, - # but nowadays str(exception) produces good messages. - return prefix + str(exc) - - -# Needed by 'split_quoted()' -_wordchars_re = _squote_re = _dquote_re = None -def _init_regex(): - global _wordchars_re, _squote_re, _dquote_re - _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) - _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") - _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"') - -def split_quoted (s): - """Split a string up according to Unix shell-like rules for quotes and - backslashes. In short: words are delimited by spaces, as long as those - spaces are not escaped by a backslash, or inside a quoted string. - Single and double quotes are equivalent, and the quote characters can - be backslash-escaped. The backslash is stripped from any two-character - escape sequence, leaving only the escaped character. The quote - characters are stripped from any quoted string. Returns a list of - words. - """ - - # This is a nice algorithm for splitting up a single string, since it - # doesn't require character-by-character examination. It was a little - # bit of a brain-bender to get it working right, though... - if _wordchars_re is None: _init_regex() - - s = s.strip() - words = [] - pos = 0 - - while s: - m = _wordchars_re.match(s, pos) - end = m.end() - if end == len(s): - words.append(s[:end]) - break - - if s[end] in string.whitespace: # unescaped, unquoted whitespace: now - words.append(s[:end]) # we definitely have a word delimiter - s = s[end:].lstrip() - pos = 0 - - elif s[end] == '\\': # preserve whatever is being escaped; - # will become part of the current word - s = s[:end] + s[end+1:] - pos = end+1 - - else: - if s[end] == "'": # slurp singly-quoted string - m = _squote_re.match(s, end) - elif s[end] == '"': # slurp doubly-quoted string - m = _dquote_re.match(s, end) - else: - raise RuntimeError("this can't happen (bad char '%c')" % s[end]) - - if m is None: - raise ValueError("bad string (mismatched %s quotes?)" % s[end]) - - (beg, end) = m.span() - s = s[:beg] + s[beg+1:end-1] + s[end:] - pos = m.end() - 2 - - if pos >= len(s): - words.append(s) - break - - return words - -# split_quoted () - - -def execute (func, args, msg=None, verbose=0, dry_run=0): - """Perform some action that affects the outside world (eg. by - writing to the filesystem). Such actions are special because they - are disabled by the 'dry_run' flag. This method takes care of all - that bureaucracy for you; all you have to do is supply the - function to call and an argument tuple for it (to embody the - "external action" being performed), and an optional message to - print. - """ - if msg is None: - msg = "%s%r" % (func.__name__, args) - if msg[-2:] == ',)': # correct for singleton tuple - msg = msg[0:-2] + ')' - - log.info(msg) - if not dry_run: - func(*args) - - -def strtobool (val): - """Convert a string representation of truth to true (1) or false (0). - - True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values - are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if - 'val' is anything else. - """ - val = val.lower() - if val in ('y', 'yes', 't', 'true', 'on', '1'): - return 1 - elif val in ('n', 'no', 'f', 'false', 'off', '0'): - return 0 - else: - raise ValueError("invalid truth value %r" % (val,)) - - -def byte_compile (py_files, - optimize=0, force=0, - prefix=None, base_dir=None, - verbose=1, dry_run=0, - direct=None): - """Byte-compile a collection of Python source files to .pyc - files in a __pycache__ subdirectory. 'py_files' is a list - of files to compile; any files that don't end in ".py" are silently - skipped. 'optimize' must be one of the following: - 0 - don't optimize - 1 - normal optimization (like "python -O") - 2 - extra optimization (like "python -OO") - If 'force' is true, all files are recompiled regardless of - timestamps. - - The source filename encoded in each bytecode file defaults to the - filenames listed in 'py_files'; you can modify these with 'prefix' and - 'basedir'. 'prefix' is a string that will be stripped off of each - source filename, and 'base_dir' is a directory name that will be - prepended (after 'prefix' is stripped). You can supply either or both - (or neither) of 'prefix' and 'base_dir', as you wish. - - If 'dry_run' is true, doesn't actually do anything that would - affect the filesystem. - - Byte-compilation is either done directly in this interpreter process - with the standard py_compile module, or indirectly by writing a - temporary script and executing it. Normally, you should let - 'byte_compile()' figure out to use direct compilation or not (see - the source for details). The 'direct' flag is used by the script - generated in indirect mode; unless you know what you're doing, leave - it set to None. - """ - - # Late import to fix a bootstrap issue: _posixsubprocess is built by - # setup.py, but setup.py uses distutils. - import subprocess - - # nothing is done if sys.dont_write_bytecode is True - if sys.dont_write_bytecode: - raise DistutilsByteCompileError('byte-compiling is disabled.') - - # First, if the caller didn't force us into direct or indirect mode, - # figure out which mode we should be in. We take a conservative - # approach: choose direct mode *only* if the current interpreter is - # in debug mode and optimize is 0. If we're not in debug mode (-O - # or -OO), we don't know which level of optimization this - # interpreter is running with, so we can't do direct - # byte-compilation and be certain that it's the right thing. Thus, - # always compile indirectly if the current interpreter is in either - # optimize mode, or if either optimization level was requested by - # the caller. - if direct is None: - direct = (__debug__ and optimize == 0) - - # "Indirect" byte-compilation: write a temporary script and then - # run it with the appropriate flags. - if not direct: - try: - from tempfile import mkstemp - (script_fd, script_name) = mkstemp(".py") - except ImportError: - from tempfile import mktemp - (script_fd, script_name) = None, mktemp(".py") - log.info("writing byte-compilation script '%s'", script_name) - if not dry_run: - if script_fd is not None: - script = os.fdopen(script_fd, "w") - else: - script = open(script_name, "w") - - with script: - script.write("""\ -from distutils.util import byte_compile -files = [ -""") - - # XXX would be nice to write absolute filenames, just for - # safety's sake (script should be more robust in the face of - # chdir'ing before running it). But this requires abspath'ing - # 'prefix' as well, and that breaks the hack in build_lib's - # 'byte_compile()' method that carefully tacks on a trailing - # slash (os.sep really) to make sure the prefix here is "just - # right". This whole prefix business is rather delicate -- the - # problem is that it's really a directory, but I'm treating it - # as a dumb string, so trailing slashes and so forth matter. - - #py_files = map(os.path.abspath, py_files) - #if prefix: - # prefix = os.path.abspath(prefix) - - script.write(",\n".join(map(repr, py_files)) + "]\n") - script.write(""" -byte_compile(files, optimize=%r, force=%r, - prefix=%r, base_dir=%r, - verbose=%r, dry_run=0, - direct=1) -""" % (optimize, force, prefix, base_dir, verbose)) - - cmd = [sys.executable] - cmd.extend(_optim_args_from_interpreter_flags()) - cmd.append(script_name) - spawn(cmd, dry_run=dry_run) - execute(os.remove, (script_name,), "removing %s" % script_name, - dry_run=dry_run) - - # "Direct" byte-compilation: use the py_compile module to compile - # right here, right now. Note that the script generated in indirect - # mode simply calls 'byte_compile()' in direct mode, a weird sort of - # cross-process recursion. Hey, it works! - else: - from py_compile import compile - - for file in py_files: - if file[-3:] != ".py": - # This lets us be lazy and not filter filenames in - # the "install_lib" command. - continue - - # Terminology from the py_compile module: - # cfile - byte-compiled file - # dfile - purported source filename (same as 'file' by default) - if optimize >= 0: - opt = '' if optimize == 0 else optimize - cfile = importlib.util.cache_from_source( - file, optimization=opt) - else: - cfile = importlib.util.cache_from_source(file) - dfile = file - if prefix: - if file[:len(prefix)] != prefix: - raise ValueError("invalid prefix: filename %r doesn't start with %r" - % (file, prefix)) - dfile = dfile[len(prefix):] - if base_dir: - dfile = os.path.join(base_dir, dfile) - - cfile_base = os.path.basename(cfile) - if direct: - if force or newer(file, cfile): - log.info("byte-compiling %s to %s", file, cfile_base) - if not dry_run: - compile(file, cfile, dfile) - else: - log.debug("skipping byte-compilation of %s to %s", - file, cfile_base) - -# byte_compile () - -def rfc822_escape (header): - """Return a version of the string escaped for inclusion in an - RFC-822 header, by ensuring there are 8 spaces space after each newline. - """ - lines = header.split('\n') - sep = '\n' + 8 * ' ' - return sep.join(lines) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/version.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/version.py deleted file mode 100755 index 35e181d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/version.py +++ /dev/null @@ -1,363 +0,0 @@ -# -# distutils/version.py -# -# Implements multiple version numbering conventions for the -# Python Module Distribution Utilities. -# -# $Id$ -# - -"""Provides classes to represent module version numbers (one class for -each style of version numbering). There are currently two such classes -implemented: StrictVersion and LooseVersion. - -Every version number class implements the following interface: - * the 'parse' method takes a string and parses it to some internal - representation; if the string is an invalid version number, - 'parse' raises a ValueError exception - * the class constructor takes an optional string argument which, - if supplied, is passed to 'parse' - * __str__ reconstructs the string that was passed to 'parse' (or - an equivalent string -- ie. one that will generate an equivalent - version number instance) - * __repr__ generates Python code to recreate the version number instance - * _cmp compares the current instance with either another instance - of the same class or a string (which will be parsed to an instance - of the same class, thus must follow the same rules) -""" - -import re -import warnings -import contextlib - - -@contextlib.contextmanager -def suppress_known_deprecation(): - with warnings.catch_warnings(record=True) as ctx: - warnings.filterwarnings( - action='default', - category=DeprecationWarning, - message="distutils Version classes are deprecated.", - ) - yield ctx - - -class Version: - """Abstract base class for version numbering classes. Just provides - constructor (__init__) and reproducer (__repr__), because those - seem to be the same for all version numbering classes; and route - rich comparisons to _cmp. - """ - - def __init__ (self, vstring=None): - warnings.warn( - "distutils Version classes are deprecated. " - "Use packaging.version instead.", - DeprecationWarning, - stacklevel=2, - ) - if vstring: - self.parse(vstring) - - def __repr__ (self): - return "%s ('%s')" % (self.__class__.__name__, str(self)) - - def __eq__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c == 0 - - def __lt__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c < 0 - - def __le__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c <= 0 - - def __gt__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c > 0 - - def __ge__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c >= 0 - - -# Interface for version-number classes -- must be implemented -# by the following classes (the concrete ones -- Version should -# be treated as an abstract class). -# __init__ (string) - create and take same action as 'parse' -# (string parameter is optional) -# parse (string) - convert a string representation to whatever -# internal representation is appropriate for -# this style of version numbering -# __str__ (self) - convert back to a string; should be very similar -# (if not identical to) the string supplied to parse -# __repr__ (self) - generate Python code to recreate -# the instance -# _cmp (self, other) - compare two version numbers ('other' may -# be an unparsed version string, or another -# instance of your version class) - - -class StrictVersion (Version): - - """Version numbering for anal retentives and software idealists. - Implements the standard interface for version number classes as - described above. A version number consists of two or three - dot-separated numeric components, with an optional "pre-release" tag - on the end. The pre-release tag consists of the letter 'a' or 'b' - followed by a number. If the numeric components of two version - numbers are equal, then one with a pre-release tag will always - be deemed earlier (lesser) than one without. - - The following are valid version numbers (shown in the order that - would be obtained by sorting according to the supplied cmp function): - - 0.4 0.4.0 (these two are equivalent) - 0.4.1 - 0.5a1 - 0.5b3 - 0.5 - 0.9.6 - 1.0 - 1.0.4a3 - 1.0.4b1 - 1.0.4 - - The following are examples of invalid version numbers: - - 1 - 2.7.2.2 - 1.3.a4 - 1.3pl1 - 1.3c4 - - The rationale for this version numbering system will be explained - in the distutils documentation. - """ - - version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$', - re.VERBOSE | re.ASCII) - - - def parse (self, vstring): - match = self.version_re.match(vstring) - if not match: - raise ValueError("invalid version number '%s'" % vstring) - - (major, minor, patch, prerelease, prerelease_num) = \ - match.group(1, 2, 4, 5, 6) - - if patch: - self.version = tuple(map(int, [major, minor, patch])) - else: - self.version = tuple(map(int, [major, minor])) + (0,) - - if prerelease: - self.prerelease = (prerelease[0], int(prerelease_num)) - else: - self.prerelease = None - - - def __str__ (self): - - if self.version[2] == 0: - vstring = '.'.join(map(str, self.version[0:2])) - else: - vstring = '.'.join(map(str, self.version)) - - if self.prerelease: - vstring = vstring + self.prerelease[0] + str(self.prerelease[1]) - - return vstring - - - def _cmp (self, other): - if isinstance(other, str): - with suppress_known_deprecation(): - other = StrictVersion(other) - elif not isinstance(other, StrictVersion): - return NotImplemented - - if self.version != other.version: - # numeric versions don't match - # prerelease stuff doesn't matter - if self.version < other.version: - return -1 - else: - return 1 - - # have to compare prerelease - # case 1: neither has prerelease; they're equal - # case 2: self has prerelease, other doesn't; other is greater - # case 3: self doesn't have prerelease, other does: self is greater - # case 4: both have prerelease: must compare them! - - if (not self.prerelease and not other.prerelease): - return 0 - elif (self.prerelease and not other.prerelease): - return -1 - elif (not self.prerelease and other.prerelease): - return 1 - elif (self.prerelease and other.prerelease): - if self.prerelease == other.prerelease: - return 0 - elif self.prerelease < other.prerelease: - return -1 - else: - return 1 - else: - assert False, "never get here" - -# end class StrictVersion - - -# The rules according to Greg Stein: -# 1) a version number has 1 or more numbers separated by a period or by -# sequences of letters. If only periods, then these are compared -# left-to-right to determine an ordering. -# 2) sequences of letters are part of the tuple for comparison and are -# compared lexicographically -# 3) recognize the numeric components may have leading zeroes -# -# The LooseVersion class below implements these rules: a version number -# string is split up into a tuple of integer and string components, and -# comparison is a simple tuple comparison. This means that version -# numbers behave in a predictable and obvious way, but a way that might -# not necessarily be how people *want* version numbers to behave. There -# wouldn't be a problem if people could stick to purely numeric version -# numbers: just split on period and compare the numbers as tuples. -# However, people insist on putting letters into their version numbers; -# the most common purpose seems to be: -# - indicating a "pre-release" version -# ('alpha', 'beta', 'a', 'b', 'pre', 'p') -# - indicating a post-release patch ('p', 'pl', 'patch') -# but of course this can't cover all version number schemes, and there's -# no way to know what a programmer means without asking him. -# -# The problem is what to do with letters (and other non-numeric -# characters) in a version number. The current implementation does the -# obvious and predictable thing: keep them as strings and compare -# lexically within a tuple comparison. This has the desired effect if -# an appended letter sequence implies something "post-release": -# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002". -# -# However, if letters in a version number imply a pre-release version, -# the "obvious" thing isn't correct. Eg. you would expect that -# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison -# implemented here, this just isn't so. -# -# Two possible solutions come to mind. The first is to tie the -# comparison algorithm to a particular set of semantic rules, as has -# been done in the StrictVersion class above. This works great as long -# as everyone can go along with bondage and discipline. Hopefully a -# (large) subset of Python module programmers will agree that the -# particular flavour of bondage and discipline provided by StrictVersion -# provides enough benefit to be worth using, and will submit their -# version numbering scheme to its domination. The free-thinking -# anarchists in the lot will never give in, though, and something needs -# to be done to accommodate them. -# -# Perhaps a "moderately strict" version class could be implemented that -# lets almost anything slide (syntactically), and makes some heuristic -# assumptions about non-digits in version number strings. This could -# sink into special-case-hell, though; if I was as talented and -# idiosyncratic as Larry Wall, I'd go ahead and implement a class that -# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is -# just as happy dealing with things like "2g6" and "1.13++". I don't -# think I'm smart enough to do it right though. -# -# In any case, I've coded the test suite for this module (see -# ../test/test_version.py) specifically to fail on things like comparing -# "1.2a2" and "1.2". That's not because the *code* is doing anything -# wrong, it's because the simple, obvious design doesn't match my -# complicated, hairy expectations for real-world version numbers. It -# would be a snap to fix the test suite to say, "Yep, LooseVersion does -# the Right Thing" (ie. the code matches the conception). But I'd rather -# have a conception that matches common notions about version numbers. - -class LooseVersion (Version): - - """Version numbering for anarchists and software realists. - Implements the standard interface for version number classes as - described above. A version number consists of a series of numbers, - separated by either periods or strings of letters. When comparing - version numbers, the numeric components will be compared - numerically, and the alphabetic components lexically. The following - are all valid version numbers, in no particular order: - - 1.5.1 - 1.5.2b2 - 161 - 3.10a - 8.02 - 3.4j - 1996.07.12 - 3.2.pl0 - 3.1.1.6 - 2g6 - 11g - 0.960923 - 2.2beta29 - 1.13++ - 5.5.kw - 2.0b1pl0 - - In fact, there is no such thing as an invalid version number under - this scheme; the rules for comparison are simple and predictable, - but may not always give the results you want (for some definition - of "want"). - """ - - component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE) - - def parse (self, vstring): - # I've given up on thinking I can reconstruct the version string - # from the parsed tuple -- so I just store the string here for - # use by __str__ - self.vstring = vstring - components = [x for x in self.component_re.split(vstring) - if x and x != '.'] - for i, obj in enumerate(components): - try: - components[i] = int(obj) - except ValueError: - pass - - self.version = components - - - def __str__ (self): - return self.vstring - - - def __repr__ (self): - return "LooseVersion ('%s')" % str(self) - - - def _cmp (self, other): - if isinstance(other, str): - other = LooseVersion(other) - elif not isinstance(other, LooseVersion): - return NotImplemented - - if self.version == other.version: - return 0 - if self.version < other.version: - return -1 - if self.version > other.version: - return 1 - - -# end class LooseVersion diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/versionpredicate.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/versionpredicate.py deleted file mode 100755 index 55f25d9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_distutils/versionpredicate.py +++ /dev/null @@ -1,169 +0,0 @@ -"""Module for parsing and testing package version predicate strings. -""" -import re -import distutils.version -import operator - - -re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)", - re.ASCII) -# (package) (rest) - -re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses -re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$") -# (comp) (version) - - -def splitUp(pred): - """Parse a single version comparison. - - Return (comparison string, StrictVersion) - """ - res = re_splitComparison.match(pred) - if not res: - raise ValueError("bad package restriction syntax: %r" % pred) - comp, verStr = res.groups() - with distutils.version.suppress_known_deprecation(): - other = distutils.version.StrictVersion(verStr) - return (comp, other) - -compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq, - ">": operator.gt, ">=": operator.ge, "!=": operator.ne} - -class VersionPredicate: - """Parse and test package version predicates. - - >>> v = VersionPredicate('pyepat.abc (>1.0, <3333.3a1, !=1555.1b3)') - - The `name` attribute provides the full dotted name that is given:: - - >>> v.name - 'pyepat.abc' - - The str() of a `VersionPredicate` provides a normalized - human-readable version of the expression:: - - >>> print(v) - pyepat.abc (> 1.0, < 3333.3a1, != 1555.1b3) - - The `satisfied_by()` method can be used to determine with a given - version number is included in the set described by the version - restrictions:: - - >>> v.satisfied_by('1.1') - True - >>> v.satisfied_by('1.4') - True - >>> v.satisfied_by('1.0') - False - >>> v.satisfied_by('4444.4') - False - >>> v.satisfied_by('1555.1b3') - False - - `VersionPredicate` is flexible in accepting extra whitespace:: - - >>> v = VersionPredicate(' pat( == 0.1 ) ') - >>> v.name - 'pat' - >>> v.satisfied_by('0.1') - True - >>> v.satisfied_by('0.2') - False - - If any version numbers passed in do not conform to the - restrictions of `StrictVersion`, a `ValueError` is raised:: - - >>> v = VersionPredicate('p1.p2.p3.p4(>=1.0, <=1.3a1, !=1.2zb3)') - Traceback (most recent call last): - ... - ValueError: invalid version number '1.2zb3' - - It the module or package name given does not conform to what's - allowed as a legal module or package name, `ValueError` is - raised:: - - >>> v = VersionPredicate('foo-bar') - Traceback (most recent call last): - ... - ValueError: expected parenthesized list: '-bar' - - >>> v = VersionPredicate('foo bar (12.21)') - Traceback (most recent call last): - ... - ValueError: expected parenthesized list: 'bar (12.21)' - - """ - - def __init__(self, versionPredicateStr): - """Parse a version predicate string. - """ - # Fields: - # name: package name - # pred: list of (comparison string, StrictVersion) - - versionPredicateStr = versionPredicateStr.strip() - if not versionPredicateStr: - raise ValueError("empty package restriction") - match = re_validPackage.match(versionPredicateStr) - if not match: - raise ValueError("bad package name in %r" % versionPredicateStr) - self.name, paren = match.groups() - paren = paren.strip() - if paren: - match = re_paren.match(paren) - if not match: - raise ValueError("expected parenthesized list: %r" % paren) - str = match.groups()[0] - self.pred = [splitUp(aPred) for aPred in str.split(",")] - if not self.pred: - raise ValueError("empty parenthesized list in %r" - % versionPredicateStr) - else: - self.pred = [] - - def __str__(self): - if self.pred: - seq = [cond + " " + str(ver) for cond, ver in self.pred] - return self.name + " (" + ", ".join(seq) + ")" - else: - return self.name - - def satisfied_by(self, version): - """True if version is compatible with all the predicates in self. - The parameter version must be acceptable to the StrictVersion - constructor. It may be either a string or StrictVersion. - """ - for cond, ver in self.pred: - if not compmap[cond](version, ver): - return False - return True - - -_provision_rx = None - -def split_provision(value): - """Return the name and optional version number of a provision. - - The version number, if given, will be returned as a `StrictVersion` - instance, otherwise it will be `None`. - - >>> split_provision('mypkg') - ('mypkg', None) - >>> split_provision(' mypkg( 1.2 ) ') - ('mypkg', StrictVersion ('1.2')) - """ - global _provision_rx - if _provision_rx is None: - _provision_rx = re.compile( - r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$", - re.ASCII) - value = value.strip() - m = _provision_rx.match(value) - if not m: - raise ValueError("illegal provides specification: %r" % value) - ver = m.group(2) or None - if ver: - with distutils.version.suppress_known_deprecation(): - ver = distutils.version.StrictVersion(ver) - return m.group(1), ver diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_imp.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_imp.py deleted file mode 100755 index 47efd79..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_imp.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Re-implementation of find_module and get_frozen_object -from the deprecated imp module. -""" - -import os -import importlib.util -import importlib.machinery - -from .py34compat import module_from_spec - - -PY_SOURCE = 1 -PY_COMPILED = 2 -C_EXTENSION = 3 -C_BUILTIN = 6 -PY_FROZEN = 7 - - -def find_spec(module, paths): - finder = ( - importlib.machinery.PathFinder().find_spec - if isinstance(paths, list) else - importlib.util.find_spec - ) - return finder(module, paths) - - -def find_module(module, paths=None): - """Just like 'imp.find_module()', but with package support""" - spec = find_spec(module, paths) - if spec is None: - raise ImportError("Can't find %s" % module) - if not spec.has_location and hasattr(spec, 'submodule_search_locations'): - spec = importlib.util.spec_from_loader('__init__.py', spec.loader) - - kind = -1 - file = None - static = isinstance(spec.loader, type) - if spec.origin == 'frozen' or static and issubclass( - spec.loader, importlib.machinery.FrozenImporter): - kind = PY_FROZEN - path = None # imp compabilty - suffix = mode = '' # imp compatibility - elif spec.origin == 'built-in' or static and issubclass( - spec.loader, importlib.machinery.BuiltinImporter): - kind = C_BUILTIN - path = None # imp compabilty - suffix = mode = '' # imp compatibility - elif spec.has_location: - path = spec.origin - suffix = os.path.splitext(path)[1] - mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb' - - if suffix in importlib.machinery.SOURCE_SUFFIXES: - kind = PY_SOURCE - elif suffix in importlib.machinery.BYTECODE_SUFFIXES: - kind = PY_COMPILED - elif suffix in importlib.machinery.EXTENSION_SUFFIXES: - kind = C_EXTENSION - - if kind in {PY_SOURCE, PY_COMPILED}: - file = open(path, mode) - else: - path = None - suffix = mode = '' - - return file, path, (suffix, mode, kind) - - -def get_frozen_object(module, paths=None): - spec = find_spec(module, paths) - if not spec: - raise ImportError("Can't find %s" % module) - return spec.loader.get_code(module) - - -def get_module(module, paths, info): - spec = find_spec(module, paths) - if not spec: - raise ImportError("Can't find %s" % module) - return module_from_spec(spec) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/__init__.py deleted file mode 100755 index 19a169f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .more import * # noqa -from .recipes import * # noqa - -__version__ = '8.8.0' diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/more.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/more.py deleted file mode 100755 index 0f7d282..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/more.py +++ /dev/null @@ -1,3825 +0,0 @@ -import warnings - -from collections import Counter, defaultdict, deque, abc -from collections.abc import Sequence -from concurrent.futures import ThreadPoolExecutor -from functools import partial, reduce, wraps -from heapq import merge, heapify, heapreplace, heappop -from itertools import ( - chain, - compress, - count, - cycle, - dropwhile, - groupby, - islice, - repeat, - starmap, - takewhile, - tee, - zip_longest, -) -from math import exp, factorial, floor, log -from queue import Empty, Queue -from random import random, randrange, uniform -from operator import itemgetter, mul, sub, gt, lt -from sys import hexversion, maxsize -from time import monotonic - -from .recipes import ( - consume, - flatten, - pairwise, - powerset, - take, - unique_everseen, -) - -__all__ = [ - 'AbortThread', - 'adjacent', - 'always_iterable', - 'always_reversible', - 'bucket', - 'callback_iter', - 'chunked', - 'circular_shifts', - 'collapse', - 'collate', - 'consecutive_groups', - 'consumer', - 'countable', - 'count_cycle', - 'mark_ends', - 'difference', - 'distinct_combinations', - 'distinct_permutations', - 'distribute', - 'divide', - 'exactly_n', - 'filter_except', - 'first', - 'groupby_transform', - 'ilen', - 'interleave_longest', - 'interleave', - 'intersperse', - 'islice_extended', - 'iterate', - 'ichunked', - 'is_sorted', - 'last', - 'locate', - 'lstrip', - 'make_decorator', - 'map_except', - 'map_reduce', - 'nth_or_last', - 'nth_permutation', - 'nth_product', - 'numeric_range', - 'one', - 'only', - 'padded', - 'partitions', - 'set_partitions', - 'peekable', - 'repeat_last', - 'replace', - 'rlocate', - 'rstrip', - 'run_length', - 'sample', - 'seekable', - 'SequenceView', - 'side_effect', - 'sliced', - 'sort_together', - 'split_at', - 'split_after', - 'split_before', - 'split_when', - 'split_into', - 'spy', - 'stagger', - 'strip', - 'substrings', - 'substrings_indexes', - 'time_limited', - 'unique_to_each', - 'unzip', - 'windowed', - 'with_iter', - 'UnequalIterablesError', - 'zip_equal', - 'zip_offset', - 'windowed_complete', - 'all_unique', - 'value_chain', - 'product_index', - 'combination_index', - 'permutation_index', -] - -_marker = object() - - -def chunked(iterable, n, strict=False): - """Break *iterable* into lists of length *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) - [[1, 2, 3], [4, 5, 6]] - - By the default, the last yielded list will have fewer than *n* elements - if the length of *iterable* is not divisible by *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) - [[1, 2, 3], [4, 5, 6], [7, 8]] - - To use a fill-in value instead, see the :func:`grouper` recipe. - - If the length of *iterable* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - list is yielded. - - """ - iterator = iter(partial(take, n, iter(iterable)), []) - if strict: - - def ret(): - for chunk in iterator: - if len(chunk) != n: - raise ValueError('iterable is not divisible by n.') - yield chunk - - return iter(ret()) - else: - return iterator - - -def first(iterable, default=_marker): - """Return the first item of *iterable*, or *default* if *iterable* is - empty. - - >>> first([0, 1, 2, 3]) - 0 - >>> first([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - - :func:`first` is useful when you have a generator of expensive-to-retrieve - values and want any arbitrary one. It is marginally shorter than - ``next(iter(iterable), default)``. - - """ - try: - return next(iter(iterable)) - except StopIteration as e: - if default is _marker: - raise ValueError( - 'first() was called on an empty iterable, and no ' - 'default value was provided.' - ) from e - return default - - -def last(iterable, default=_marker): - """Return the last item of *iterable*, or *default* if *iterable* is - empty. - - >>> last([0, 1, 2, 3]) - 3 - >>> last([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - try: - if isinstance(iterable, Sequence): - return iterable[-1] - # Work around https://bugs.python.org/issue38525 - elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): - return next(reversed(iterable)) - else: - return deque(iterable, maxlen=1)[-1] - except (IndexError, TypeError, StopIteration): - if default is _marker: - raise ValueError( - 'last() was called on an empty iterable, and no default was ' - 'provided.' - ) - return default - - -def nth_or_last(iterable, n, default=_marker): - """Return the nth or the last item of *iterable*, - or *default* if *iterable* is empty. - - >>> nth_or_last([0, 1, 2, 3], 2) - 2 - >>> nth_or_last([0, 1], 2) - 1 - >>> nth_or_last([], 0, 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - return last(islice(iterable, n + 1), default=default) - - -class peekable: - """Wrap an iterator to allow lookahead and prepending elements. - - Call :meth:`peek` on the result to get the value that will be returned - by :func:`next`. This won't advance the iterator: - - >>> p = peekable(['a', 'b']) - >>> p.peek() - 'a' - >>> next(p) - 'a' - - Pass :meth:`peek` a default value to return that instead of raising - ``StopIteration`` when the iterator is exhausted. - - >>> p = peekable([]) - >>> p.peek('hi') - 'hi' - - peekables also offer a :meth:`prepend` method, which "inserts" items - at the head of the iterable: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> p.peek() - 11 - >>> list(p) - [11, 12, 1, 2, 3] - - peekables can be indexed. Index 0 is the item that will be returned by - :func:`next`, index 1 is the item after that, and so on: - The values up to the given index will be cached. - - >>> p = peekable(['a', 'b', 'c', 'd']) - >>> p[0] - 'a' - >>> p[1] - 'b' - >>> next(p) - 'a' - - Negative indexes are supported, but be aware that they will cache the - remaining items in the source iterator, which may require significant - storage. - - To check whether a peekable is exhausted, check its truth value: - - >>> p = peekable(['a', 'b']) - >>> if p: # peekable has items - ... list(p) - ['a', 'b'] - >>> if not p: # peekable is exhausted - ... list(p) - [] - - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self._cache = deque() - - def __iter__(self): - return self - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - """Return the item that will be next returned from ``next()``. - - Return ``default`` if there are no items left. If ``default`` is not - provided, raise ``StopIteration``. - - """ - if not self._cache: - try: - self._cache.append(next(self._it)) - except StopIteration: - if default is _marker: - raise - return default - return self._cache[0] - - def prepend(self, *items): - """Stack up items to be the next ones returned from ``next()`` or - ``self.peek()``. The items will be returned in - first in, first out order:: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> list(p) - [11, 12, 1, 2, 3] - - It is possible, by prepending items, to "resurrect" a peekable that - previously raised ``StopIteration``. - - >>> p = peekable([]) - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - >>> p.prepend(1) - >>> next(p) - 1 - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - - """ - self._cache.extendleft(reversed(items)) - - def __next__(self): - if self._cache: - return self._cache.popleft() - - return next(self._it) - - def _get_slice(self, index): - # Normalize the slice's arguments - step = 1 if (index.step is None) else index.step - if step > 0: - start = 0 if (index.start is None) else index.start - stop = maxsize if (index.stop is None) else index.stop - elif step < 0: - start = -1 if (index.start is None) else index.start - stop = (-maxsize - 1) if (index.stop is None) else index.stop - else: - raise ValueError('slice step cannot be zero') - - # If either the start or stop index is negative, we'll need to cache - # the rest of the iterable in order to slice from the right side. - if (start < 0) or (stop < 0): - self._cache.extend(self._it) - # Otherwise we'll need to find the rightmost index and cache to that - # point. - else: - n = min(max(start, stop) + 1, maxsize) - cache_len = len(self._cache) - if n >= cache_len: - self._cache.extend(islice(self._it, n - cache_len)) - - return list(self._cache)[index] - - def __getitem__(self, index): - if isinstance(index, slice): - return self._get_slice(index) - - cache_len = len(self._cache) - if index < 0: - self._cache.extend(self._it) - elif index >= cache_len: - self._cache.extend(islice(self._it, index + 1 - cache_len)) - - return self._cache[index] - - -def collate(*iterables, **kwargs): - """Return a sorted merge of the items from each of several already-sorted - *iterables*. - - >>> list(collate('ACDZ', 'AZ', 'JKL')) - ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] - - Works lazily, keeping only the next value from each iterable in memory. Use - :func:`collate` to, for example, perform a n-way mergesort of items that - don't fit in memory. - - If a *key* function is specified, the iterables will be sorted according - to its result: - - >>> key = lambda s: int(s) # Sort by numeric value, not by string - >>> list(collate(['1', '10'], ['2', '11'], key=key)) - ['1', '2', '10', '11'] - - - If the *iterables* are sorted in descending order, set *reverse* to - ``True``: - - >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) - [5, 4, 3, 2, 1, 0] - - If the elements of the passed-in iterables are out of order, you might get - unexpected results. - - On Python 3.5+, this function is an alias for :func:`heapq.merge`. - - """ - warnings.warn( - "collate is no longer part of more_itertools, use heapq.merge", - DeprecationWarning, - ) - return merge(*iterables, **kwargs) - - -def consumer(func): - """Decorator that automatically advances a PEP-342-style "reverse iterator" - to its first yield point so you don't have to call ``next()`` on it - manually. - - >>> @consumer - ... def tally(): - ... i = 0 - ... while True: - ... print('Thing number %s is %s.' % (i, (yield))) - ... i += 1 - ... - >>> t = tally() - >>> t.send('red') - Thing number 0 is red. - >>> t.send('fish') - Thing number 1 is fish. - - Without the decorator, you would have to call ``next(t)`` before - ``t.send()`` could be used. - - """ - - @wraps(func) - def wrapper(*args, **kwargs): - gen = func(*args, **kwargs) - next(gen) - return gen - - return wrapper - - -def ilen(iterable): - """Return the number of items in *iterable*. - - >>> ilen(x for x in range(1000000) if x % 3 == 0) - 333334 - - This consumes the iterable, so handle with care. - - """ - # This approach was selected because benchmarks showed it's likely the - # fastest of the known implementations at the time of writing. - # See GitHub tracker: #236, #230. - counter = count() - deque(zip(iterable, counter), maxlen=0) - return next(counter) - - -def iterate(func, start): - """Return ``start``, ``func(start)``, ``func(func(start))``, ... - - >>> from itertools import islice - >>> list(islice(iterate(lambda x: 2*x, 1), 10)) - [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] - - """ - while True: - yield start - start = func(start) - - -def with_iter(context_manager): - """Wrap an iterable in a ``with`` statement, so it closes once exhausted. - - For example, this will close the file when the iterator is exhausted:: - - upper_lines = (line.upper() for line in with_iter(open('foo'))) - - Any context manager which returns an iterable is a candidate for - ``with_iter``. - - """ - with context_manager as iterable: - yield from iterable - - -def one(iterable, too_short=None, too_long=None): - """Return the first item from *iterable*, which is expected to contain only - that item. Raise an exception if *iterable* is empty or has more than one - item. - - :func:`one` is useful for ensuring that an iterable contains only one item. - For example, it can be used to retrieve the result of a database query - that is expected to return a single row. - - If *iterable* is empty, ``ValueError`` will be raised. You may specify a - different exception with the *too_short* keyword: - - >>> it = [] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too many items in iterable (expected 1)' - >>> too_short = IndexError('too few items') - >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - IndexError: too few items - - Similarly, if *iterable* contains more than one item, ``ValueError`` will - be raised. You may specify a different exception with the *too_long* - keyword: - - >>> it = ['too', 'many'] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 'too', - 'many', and perhaps more. - >>> too_long = RuntimeError - >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - RuntimeError - - Note that :func:`one` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check iterable - contents less destructively. - - """ - it = iter(iterable) - - try: - first_value = next(it) - except StopIteration as e: - raise ( - too_short or ValueError('too few items in iterable (expected 1)') - ) from e - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def distinct_permutations(iterable, r=None): - """Yield successive distinct permutations of the elements in *iterable*. - - >>> sorted(distinct_permutations([1, 0, 1])) - [(0, 1, 1), (1, 0, 1), (1, 1, 0)] - - Equivalent to ``set(permutations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - Duplicate permutations arise when there are duplicated elements in the - input iterable. The number of items returned is - `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of - items input, and each `x_i` is the count of a distinct item in the input - sequence. - - If *r* is given, only the *r*-length permutations are yielded. - - >>> sorted(distinct_permutations([1, 0, 1], r=2)) - [(0, 1), (1, 0), (1, 1)] - >>> sorted(distinct_permutations(range(3), r=2)) - [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] - - """ - # Algorithm: https://w.wiki/Qai - def _full(A): - while True: - # Yield the permutation we have - yield tuple(A) - - # Find the largest index i such that A[i] < A[i + 1] - for i in range(size - 2, -1, -1): - if A[i] < A[i + 1]: - break - # If no such index exists, this permutation is the last one - else: - return - - # Find the largest index j greater than j such that A[i] < A[j] - for j in range(size - 1, i, -1): - if A[i] < A[j]: - break - - # Swap the value of A[i] with that of A[j], then reverse the - # sequence from A[i + 1] to form the new permutation - A[i], A[j] = A[j], A[i] - A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] - - # Algorithm: modified from the above - def _partial(A, r): - # Split A into the first r items and the last r items - head, tail = A[:r], A[r:] - right_head_indexes = range(r - 1, -1, -1) - left_tail_indexes = range(len(tail)) - - while True: - # Yield the permutation we have - yield tuple(head) - - # Starting from the right, find the first index of the head with - # value smaller than the maximum value of the tail - call it i. - pivot = tail[-1] - for i in right_head_indexes: - if head[i] < pivot: - break - pivot = head[i] - else: - return - - # Starting from the left, find the first value of the tail - # with a value greater than head[i] and swap. - for j in left_tail_indexes: - if tail[j] > head[i]: - head[i], tail[j] = tail[j], head[i] - break - # If we didn't find one, start from the right and find the first - # index of the head with a value greater than head[i] and swap. - else: - for j in right_head_indexes: - if head[j] > head[i]: - head[i], head[j] = head[j], head[i] - break - - # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] - tail += head[: i - r : -1] # head[i + 1:][::-1] - i += 1 - head[i:], tail[:] = tail[: r - i], tail[r - i :] - - items = sorted(iterable) - - size = len(items) - if r is None: - r = size - - if 0 < r <= size: - return _full(items) if (r == size) else _partial(items, r) - - return iter(() if r else ((),)) - - -def intersperse(e, iterable, n=1): - """Intersperse filler element *e* among the items in *iterable*, leaving - *n* items between each filler element. - - >>> list(intersperse('!', [1, 2, 3, 4, 5])) - [1, '!', 2, '!', 3, '!', 4, '!', 5] - - >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) - [1, 2, None, 3, 4, None, 5] - - """ - if n == 0: - raise ValueError('n must be > 0') - elif n == 1: - # interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2... - # islice(..., 1, None) -> x_0, e, e, x_1, e, x_2... - return islice(interleave(repeat(e), iterable), 1, None) - else: - # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... - # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... - # flatten(...) -> x_0, x_1, e, x_2, x_3... - filler = repeat([e]) - chunks = chunked(iterable, n) - return flatten(islice(interleave(filler, chunks), 1, None)) - - -def unique_to_each(*iterables): - """Return the elements from each of the input iterables that aren't in the - other input iterables. - - For example, suppose you have a set of packages, each with a set of - dependencies:: - - {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} - - If you remove one package, which dependencies can also be removed? - - If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not - associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for - ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: - - >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) - [['A'], ['C'], ['D']] - - If there are duplicates in one input iterable that aren't in the others - they will be duplicated in the output. Input order is preserved:: - - >>> unique_to_each("mississippi", "missouri") - [['p', 'p'], ['o', 'u', 'r']] - - It is assumed that the elements of each iterable are hashable. - - """ - pool = [list(it) for it in iterables] - counts = Counter(chain.from_iterable(map(set, pool))) - uniques = {element for element in counts if counts[element] == 1} - return [list(filter(uniques.__contains__, it)) for it in pool] - - -def windowed(seq, n, fillvalue=None, step=1): - """Return a sliding window of width *n* over the given iterable. - - >>> all_windows = windowed([1, 2, 3, 4, 5], 3) - >>> list(all_windows) - [(1, 2, 3), (2, 3, 4), (3, 4, 5)] - - When the window is larger than the iterable, *fillvalue* is used in place - of missing values: - - >>> list(windowed([1, 2, 3], 4)) - [(1, 2, 3, None)] - - Each window will advance in increments of *step*: - - >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) - [(1, 2, 3), (3, 4, 5), (5, 6, '!')] - - To slide into the iterable's items, use :func:`chain` to add filler items - to the left: - - >>> iterable = [1, 2, 3, 4] - >>> n = 3 - >>> padding = [None] * (n - 1) - >>> list(windowed(chain(padding, iterable), 3)) - [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] - """ - if n < 0: - raise ValueError('n must be >= 0') - if n == 0: - yield tuple() - return - if step < 1: - raise ValueError('step must be >= 1') - - window = deque(maxlen=n) - i = n - for _ in map(window.append, seq): - i -= 1 - if not i: - i = step - yield tuple(window) - - size = len(window) - if size < n: - yield tuple(chain(window, repeat(fillvalue, n - size))) - elif 0 < i < min(step, n): - window += (fillvalue,) * i - yield tuple(window) - - -def substrings(iterable): - """Yield all of the substrings of *iterable*. - - >>> [''.join(s) for s in substrings('more')] - ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] - - Note that non-string iterables can also be subdivided. - - >>> list(substrings([0, 1, 2])) - [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] - - """ - # The length-1 substrings - seq = [] - for item in iter(iterable): - seq.append(item) - yield (item,) - seq = tuple(seq) - item_count = len(seq) - - # And the rest - for n in range(2, item_count + 1): - for i in range(item_count - n + 1): - yield seq[i : i + n] - - -def substrings_indexes(seq, reverse=False): - """Yield all substrings and their positions in *seq* - - The items yielded will be a tuple of the form ``(substr, i, j)``, where - ``substr == seq[i:j]``. - - This function only works for iterables that support slicing, such as - ``str`` objects. - - >>> for item in substrings_indexes('more'): - ... print(item) - ('m', 0, 1) - ('o', 1, 2) - ('r', 2, 3) - ('e', 3, 4) - ('mo', 0, 2) - ('or', 1, 3) - ('re', 2, 4) - ('mor', 0, 3) - ('ore', 1, 4) - ('more', 0, 4) - - Set *reverse* to ``True`` to yield the same items in the opposite order. - - - """ - r = range(1, len(seq) + 1) - if reverse: - r = reversed(r) - return ( - (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) - ) - - -class bucket: - """Wrap *iterable* and return an object that buckets it iterable into - child iterables based on a *key* function. - - >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] - >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character - >>> sorted(list(s)) # Get the keys - ['a', 'b', 'c'] - >>> a_iterable = s['a'] - >>> next(a_iterable) - 'a1' - >>> next(a_iterable) - 'a2' - >>> list(s['b']) - ['b1', 'b2', 'b3'] - - The original iterable will be advanced and its items will be cached until - they are used by the child iterables. This may require significant storage. - - By default, attempting to select a bucket to which no items belong will - exhaust the iterable and cache all values. - If you specify a *validator* function, selected buckets will instead be - checked against it. - - >>> from itertools import count - >>> it = count(1, 2) # Infinite sequence of odd numbers - >>> key = lambda x: x % 10 # Bucket by last digit - >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only - >>> s = bucket(it, key=key, validator=validator) - >>> 2 in s - False - >>> list(s[2]) - [] - - """ - - def __init__(self, iterable, key, validator=None): - self._it = iter(iterable) - self._key = key - self._cache = defaultdict(deque) - self._validator = validator or (lambda x: True) - - def __contains__(self, value): - if not self._validator(value): - return False - - try: - item = next(self[value]) - except StopIteration: - return False - else: - self._cache[value].appendleft(item) - - return True - - def _get_values(self, value): - """ - Helper to yield items from the parent iterator that match *value*. - Items that don't match are stored in the local cache as they - are encountered. - """ - while True: - # If we've cached some items that match the target value, emit - # the first one and evict it from the cache. - if self._cache[value]: - yield self._cache[value].popleft() - # Otherwise we need to advance the parent iterator to search for - # a matching item, caching the rest. - else: - while True: - try: - item = next(self._it) - except StopIteration: - return - item_value = self._key(item) - if item_value == value: - yield item - break - elif self._validator(item_value): - self._cache[item_value].append(item) - - def __iter__(self): - for item in self._it: - item_value = self._key(item) - if self._validator(item_value): - self._cache[item_value].append(item) - - yield from self._cache.keys() - - def __getitem__(self, value): - if not self._validator(value): - return iter(()) - - return self._get_values(value) - - -def spy(iterable, n=1): - """Return a 2-tuple with a list containing the first *n* elements of - *iterable*, and an iterator with the same items as *iterable*. - This allows you to "look ahead" at the items in the iterable without - advancing it. - - There is one item in the list by default: - - >>> iterable = 'abcdefg' - >>> head, iterable = spy(iterable) - >>> head - ['a'] - >>> list(iterable) - ['a', 'b', 'c', 'd', 'e', 'f', 'g'] - - You may use unpacking to retrieve items instead of lists: - - >>> (head,), iterable = spy('abcdefg') - >>> head - 'a' - >>> (first, second), iterable = spy('abcdefg', 2) - >>> first - 'a' - >>> second - 'b' - - The number of items requested can be larger than the number of items in - the iterable: - - >>> iterable = [1, 2, 3, 4, 5] - >>> head, iterable = spy(iterable, 10) - >>> head - [1, 2, 3, 4, 5] - >>> list(iterable) - [1, 2, 3, 4, 5] - - """ - it = iter(iterable) - head = take(n, it) - - return head.copy(), chain(head, it) - - -def interleave(*iterables): - """Return a new iterable yielding from each iterable in turn, - until the shortest is exhausted. - - >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7] - - For a version that doesn't terminate after the shortest iterable is - exhausted, see :func:`interleave_longest`. - - """ - return chain.from_iterable(zip(*iterables)) - - -def interleave_longest(*iterables): - """Return a new iterable yielding from each iterable in turn, - skipping any that are exhausted. - - >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7, 3, 8] - - This function produces the same output as :func:`roundrobin`, but may - perform better for some inputs (in particular when the number of iterables - is large). - - """ - i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) - return (x for x in i if x is not _marker) - - -def collapse(iterable, base_type=None, levels=None): - """Flatten an iterable with multiple levels of nesting (e.g., a list of - lists of tuples) into non-iterable types. - - >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] - >>> list(collapse(iterable)) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and - will not be collapsed. - - To avoid collapsing other types, specify *base_type*: - - >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] - >>> list(collapse(iterable, base_type=tuple)) - ['ab', ('cd', 'ef'), 'gh', 'ij'] - - Specify *levels* to stop flattening after a certain level: - - >>> iterable = [('a', ['b']), ('c', ['d'])] - >>> list(collapse(iterable)) # Fully flattened - ['a', 'b', 'c', 'd'] - >>> list(collapse(iterable, levels=1)) # Only one level flattened - ['a', ['b'], 'c', ['d']] - - """ - - def walk(node, level): - if ( - ((levels is not None) and (level > levels)) - or isinstance(node, (str, bytes)) - or ((base_type is not None) and isinstance(node, base_type)) - ): - yield node - return - - try: - tree = iter(node) - except TypeError: - yield node - return - else: - for child in tree: - yield from walk(child, level + 1) - - yield from walk(iterable, 0) - - -def side_effect(func, iterable, chunk_size=None, before=None, after=None): - """Invoke *func* on each item in *iterable* (or on each *chunk_size* group - of items) before yielding the item. - - `func` must be a function that takes a single argument. Its return value - will be discarded. - - *before* and *after* are optional functions that take no arguments. They - will be executed before iteration starts and after it ends, respectively. - - `side_effect` can be used for logging, updating progress bars, or anything - that is not functionally "pure." - - Emitting a status message: - - >>> from more_itertools import consume - >>> func = lambda item: print('Received {}'.format(item)) - >>> consume(side_effect(func, range(2))) - Received 0 - Received 1 - - Operating on chunks of items: - - >>> pair_sums = [] - >>> func = lambda chunk: pair_sums.append(sum(chunk)) - >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) - [0, 1, 2, 3, 4, 5] - >>> list(pair_sums) - [1, 5, 9] - - Writing to a file-like object: - - >>> from io import StringIO - >>> from more_itertools import consume - >>> f = StringIO() - >>> func = lambda x: print(x, file=f) - >>> before = lambda: print(u'HEADER', file=f) - >>> after = f.close - >>> it = [u'a', u'b', u'c'] - >>> consume(side_effect(func, it, before=before, after=after)) - >>> f.closed - True - - """ - try: - if before is not None: - before() - - if chunk_size is None: - for item in iterable: - func(item) - yield item - else: - for chunk in chunked(iterable, chunk_size): - func(chunk) - yield from chunk - finally: - if after is not None: - after() - - -def sliced(seq, n, strict=False): - """Yield slices of length *n* from the sequence *seq*. - - >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) - [(1, 2, 3), (4, 5, 6)] - - By the default, the last yielded slice will have fewer than *n* elements - if the length of *seq* is not divisible by *n*: - - >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) - [(1, 2, 3), (4, 5, 6), (7, 8)] - - If the length of *seq* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - slice is yielded. - - This function will only work for iterables that support slicing. - For non-sliceable iterables, see :func:`chunked`. - - """ - iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) - if strict: - - def ret(): - for _slice in iterator: - if len(_slice) != n: - raise ValueError("seq is not divisible by n.") - yield _slice - - return iter(ret()) - else: - return iterator - - -def split_at(iterable, pred, maxsplit=-1, keep_separator=False): - """Yield lists of items from *iterable*, where each list is delimited by - an item where callable *pred* returns ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b')) - [['a'], ['c', 'd', 'c'], ['a']] - - >>> list(split_at(range(10), lambda n: n % 2 == 1)) - [[0], [2], [4], [6], [8], []] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) - [[0], [2], [4, 5, 6, 7, 8, 9]] - - By default, the delimiting items are not included in the output. - The include them, set *keep_separator* to ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) - [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item): - yield buf - if keep_separator: - yield [item] - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - else: - buf.append(item) - yield buf - - -def split_before(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends just before - an item for which callable *pred* returns ``True``: - - >>> list(split_before('OneTwo', lambda s: s.isupper())) - [['O', 'n', 'e'], ['T', 'w', 'o']] - - >>> list(split_before(range(10), lambda n: n % 3 == 0)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield [item] + list(it) - return - buf = [] - maxsplit -= 1 - buf.append(item) - if buf: - yield buf - - -def split_after(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends with an - item where callable *pred* returns ``True``: - - >>> list(split_after('one1two2', lambda s: s.isdigit())) - [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] - - >>> list(split_after(range(10), lambda n: n % 3 == 0)) - [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - buf.append(item) - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - if buf: - yield buf - - -def split_when(iterable, pred, maxsplit=-1): - """Split *iterable* into pieces based on the output of *pred*. - *pred* should be a function that takes successive pairs of items and - returns ``True`` if the iterable should be split in between them. - - For example, to find runs of increasing numbers, split the iterable when - element ``i`` is larger than element ``i + 1``: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) - [[1, 2, 3, 3], [2, 5], [2, 4], [2]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], - ... lambda x, y: x > y, maxsplit=2)) - [[1, 2, 3, 3], [2, 5], [2, 4, 2]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - it = iter(iterable) - try: - cur_item = next(it) - except StopIteration: - return - - buf = [cur_item] - for next_item in it: - if pred(cur_item, next_item): - yield buf - if maxsplit == 1: - yield [next_item] + list(it) - return - buf = [] - maxsplit -= 1 - - buf.append(next_item) - cur_item = next_item - - yield buf - - -def split_into(iterable, sizes): - """Yield a list of sequential items from *iterable* of length 'n' for each - integer 'n' in *sizes*. - - >>> list(split_into([1,2,3,4,5,6], [1,2,3])) - [[1], [2, 3], [4, 5, 6]] - - If the sum of *sizes* is smaller than the length of *iterable*, then the - remaining items of *iterable* will not be returned. - - >>> list(split_into([1,2,3,4,5,6], [2,3])) - [[1, 2], [3, 4, 5]] - - If the sum of *sizes* is larger than the length of *iterable*, fewer items - will be returned in the iteration that overruns *iterable* and further - lists will be empty: - - >>> list(split_into([1,2,3,4], [1,2,3,4])) - [[1], [2, 3], [4], []] - - When a ``None`` object is encountered in *sizes*, the returned list will - contain items up to the end of *iterable* the same way that itertools.slice - does: - - >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) - [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] - - :func:`split_into` can be useful for grouping a series of items where the - sizes of the groups are not uniform. An example would be where in a row - from a table, multiple columns represent elements of the same feature - (e.g. a point represented by x,y,z) but, the format is not the same for - all columns. - """ - # convert the iterable argument into an iterator so its contents can - # be consumed by islice in case it is a generator - it = iter(iterable) - - for size in sizes: - if size is None: - yield list(it) - return - else: - yield list(islice(it, size)) - - -def padded(iterable, fillvalue=None, n=None, next_multiple=False): - """Yield the elements from *iterable*, followed by *fillvalue*, such that - at least *n* items are emitted. - - >>> list(padded([1, 2, 3], '?', 5)) - [1, 2, 3, '?', '?'] - - If *next_multiple* is ``True``, *fillvalue* will be emitted until the - number of items emitted is a multiple of *n*:: - - >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) - [1, 2, 3, 4, None, None] - - If *n* is ``None``, *fillvalue* will be emitted indefinitely. - - """ - it = iter(iterable) - if n is None: - yield from chain(it, repeat(fillvalue)) - elif n < 1: - raise ValueError('n must be at least 1') - else: - item_count = 0 - for item in it: - yield item - item_count += 1 - - remaining = (n - item_count) % n if next_multiple else n - item_count - for _ in range(remaining): - yield fillvalue - - -def repeat_last(iterable, default=None): - """After the *iterable* is exhausted, keep yielding its last element. - - >>> list(islice(repeat_last(range(3)), 5)) - [0, 1, 2, 2, 2] - - If the iterable is empty, yield *default* forever:: - - >>> list(islice(repeat_last(range(0), 42), 5)) - [42, 42, 42, 42, 42] - - """ - item = _marker - for item in iterable: - yield item - final = default if item is _marker else item - yield from repeat(final) - - -def distribute(n, iterable): - """Distribute the items from *iterable* among *n* smaller iterables. - - >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 3, 5] - >>> list(group_2) - [2, 4, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 4, 7], [2, 5], [3, 6]] - - If the length of *iterable* is smaller than *n*, then the last returned - iterables will be empty: - - >>> children = distribute(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function uses :func:`itertools.tee` and may require significant - storage. If you need the order items in the smaller iterables to match the - original iterable, see :func:`divide`. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - children = tee(iterable, n) - return [islice(it, index, None, n) for index, it in enumerate(children)] - - -def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): - """Yield tuples whose elements are offset from *iterable*. - The amount by which the `i`-th item in each tuple is offset is given by - the `i`-th item in *offsets*. - - >>> list(stagger([0, 1, 2, 3])) - [(None, 0, 1), (0, 1, 2), (1, 2, 3)] - >>> list(stagger(range(8), offsets=(0, 2, 4))) - [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] - - By default, the sequence will end when the final element of a tuple is the - last item in the iterable. To continue until the first element of a tuple - is the last item in the iterable, set *longest* to ``True``:: - - >>> list(stagger([0, 1, 2, 3], longest=True)) - [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - children = tee(iterable, len(offsets)) - - return zip_offset( - *children, offsets=offsets, longest=longest, fillvalue=fillvalue - ) - - -class UnequalIterablesError(ValueError): - def __init__(self, details=None): - msg = 'Iterables have different lengths' - if details is not None: - msg += (': index 0 has length {}; index {} has length {}').format( - *details - ) - - super().__init__(msg) - - -def _zip_equal_generator(iterables): - for combo in zip_longest(*iterables, fillvalue=_marker): - for val in combo: - if val is _marker: - raise UnequalIterablesError() - yield combo - - -def zip_equal(*iterables): - """``zip`` the input *iterables* together, but raise - ``UnequalIterablesError`` if they aren't all the same length. - - >>> it_1 = range(3) - >>> it_2 = iter('abc') - >>> list(zip_equal(it_1, it_2)) - [(0, 'a'), (1, 'b'), (2, 'c')] - - >>> it_1 = range(3) - >>> it_2 = iter('abcd') - >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - more_itertools.more.UnequalIterablesError: Iterables have different - lengths - - """ - if hexversion >= 0x30A00A6: - warnings.warn( - ( - 'zip_equal will be removed in a future version of ' - 'more-itertools. Use the builtin zip function with ' - 'strict=True instead.' - ), - DeprecationWarning, - ) - # Check whether the iterables are all the same size. - try: - first_size = len(iterables[0]) - for i, it in enumerate(iterables[1:], 1): - size = len(it) - if size != first_size: - break - else: - # If we didn't break out, we can use the built-in zip. - return zip(*iterables) - - # If we did break out, there was a mismatch. - raise UnequalIterablesError(details=(first_size, i, size)) - # If any one of the iterables didn't have a length, start reading - # them until one runs out. - except TypeError: - return _zip_equal_generator(iterables) - - -def zip_offset(*iterables, offsets, longest=False, fillvalue=None): - """``zip`` the input *iterables* together, but offset the `i`-th iterable - by the `i`-th item in *offsets*. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] - - This can be used as a lightweight alternative to SciPy or pandas to analyze - data sets in which some series have a lead or lag relationship. - - By default, the sequence will end when the shortest iterable is exhausted. - To continue until the longest iterable is exhausted, set *longest* to - ``True``. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - if len(iterables) != len(offsets): - raise ValueError("Number of iterables and offsets didn't match") - - staggered = [] - for it, n in zip(iterables, offsets): - if n < 0: - staggered.append(chain(repeat(fillvalue, -n), it)) - elif n > 0: - staggered.append(islice(it, n, None)) - else: - staggered.append(it) - - if longest: - return zip_longest(*staggered, fillvalue=fillvalue) - - return zip(*staggered) - - -def sort_together(iterables, key_list=(0,), key=None, reverse=False): - """Return the input iterables sorted together, with *key_list* as the - priority for sorting. All iterables are trimmed to the length of the - shortest one. - - This can be used like the sorting function in a spreadsheet. If each - iterable represents a column of data, the key list determines which - columns are used for sorting. - - By default, all iterables are sorted using the ``0``-th iterable:: - - >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] - >>> sort_together(iterables) - [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] - - Set a different key list to sort according to another iterable. - Specifying multiple keys dictates how ties are broken:: - - >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] - >>> sort_together(iterables, key_list=(1, 2)) - [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] - - To sort by a function of the elements of the iterable, pass a *key* - function. Its arguments are the elements of the iterables corresponding to - the key list:: - - >>> names = ('a', 'b', 'c') - >>> lengths = (1, 2, 3) - >>> widths = (5, 2, 1) - >>> def area(length, width): - ... return length * width - >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) - [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] - - Set *reverse* to ``True`` to sort in descending order. - - >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) - [(3, 2, 1), ('a', 'b', 'c')] - - """ - if key is None: - # if there is no key function, the key argument to sorted is an - # itemgetter - key_argument = itemgetter(*key_list) - else: - # if there is a key function, call it with the items at the offsets - # specified by the key function as arguments - key_list = list(key_list) - if len(key_list) == 1: - # if key_list contains a single item, pass the item at that offset - # as the only argument to the key function - key_offset = key_list[0] - key_argument = lambda zipped_items: key(zipped_items[key_offset]) - else: - # if key_list contains multiple items, use itemgetter to return a - # tuple of items, which we pass as *args to the key function - get_key_items = itemgetter(*key_list) - key_argument = lambda zipped_items: key( - *get_key_items(zipped_items) - ) - - return list( - zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) - ) - - -def unzip(iterable): - """The inverse of :func:`zip`, this function disaggregates the elements - of the zipped *iterable*. - - The ``i``-th iterable contains the ``i``-th element from each element - of the zipped iterable. The first element is used to to determine the - length of the remaining elements. - - >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> letters, numbers = unzip(iterable) - >>> list(letters) - ['a', 'b', 'c', 'd'] - >>> list(numbers) - [1, 2, 3, 4] - - This is similar to using ``zip(*iterable)``, but it avoids reading - *iterable* into memory. Note, however, that this function uses - :func:`itertools.tee` and thus may require significant storage. - - """ - head, iterable = spy(iter(iterable)) - if not head: - # empty iterable, e.g. zip([], [], []) - return () - # spy returns a one-length iterable as head - head = head[0] - iterables = tee(iterable, len(head)) - - def itemgetter(i): - def getter(obj): - try: - return obj[i] - except IndexError: - # basically if we have an iterable like - # iter([(1, 2, 3), (4, 5), (6,)]) - # the second unzipped iterable would fail at the third tuple - # since it would try to access tup[1] - # same with the third unzipped iterable and the second tuple - # to support these "improperly zipped" iterables, - # we create a custom itemgetter - # which just stops the unzipped iterables - # at first length mismatch - raise StopIteration - - return getter - - return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) - - -def divide(n, iterable): - """Divide the elements from *iterable* into *n* parts, maintaining - order. - - >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 2, 3] - >>> list(group_2) - [4, 5, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 2, 3], [4, 5], [6, 7]] - - If the length of the iterable is smaller than n, then the last returned - iterables will be empty: - - >>> children = divide(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function will exhaust the iterable before returning and may require - significant storage. If order is not important, see :func:`distribute`, - which does not first pull the iterable into memory. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - try: - iterable[:0] - except TypeError: - seq = tuple(iterable) - else: - seq = iterable - - q, r = divmod(len(seq), n) - - ret = [] - stop = 0 - for i in range(1, n + 1): - start = stop - stop += q + 1 if i <= r else q - ret.append(iter(seq[start:stop])) - - return ret - - -def always_iterable(obj, base_type=(str, bytes)): - """If *obj* is iterable, return an iterator over its items:: - - >>> obj = (1, 2, 3) - >>> list(always_iterable(obj)) - [1, 2, 3] - - If *obj* is not iterable, return a one-item iterable containing *obj*:: - - >>> obj = 1 - >>> list(always_iterable(obj)) - [1] - - If *obj* is ``None``, return an empty iterable: - - >>> obj = None - >>> list(always_iterable(None)) - [] - - By default, binary and text strings are not considered iterable:: - - >>> obj = 'foo' - >>> list(always_iterable(obj)) - ['foo'] - - If *base_type* is set, objects for which ``isinstance(obj, base_type)`` - returns ``True`` won't be considered iterable. - - >>> obj = {'a': 1} - >>> list(always_iterable(obj)) # Iterate over the dict's keys - ['a'] - >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit - [{'a': 1}] - - Set *base_type* to ``None`` to avoid any special handling and treat objects - Python considers iterable as iterable: - - >>> obj = 'foo' - >>> list(always_iterable(obj, base_type=None)) - ['f', 'o', 'o'] - """ - if obj is None: - return iter(()) - - if (base_type is not None) and isinstance(obj, base_type): - return iter((obj,)) - - try: - return iter(obj) - except TypeError: - return iter((obj,)) - - -def adjacent(predicate, iterable, distance=1): - """Return an iterable over `(bool, item)` tuples where the `item` is - drawn from *iterable* and the `bool` indicates whether - that item satisfies the *predicate* or is adjacent to an item that does. - - For example, to find whether items are adjacent to a ``3``:: - - >>> list(adjacent(lambda x: x == 3, range(6))) - [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] - - Set *distance* to change what counts as adjacent. For example, to find - whether items are two places away from a ``3``: - - >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) - [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] - - This is useful for contextualizing the results of a search function. - For example, a code comparison tool might want to identify lines that - have changed, but also surrounding lines to give the viewer of the diff - context. - - The predicate function will only be called once for each item in the - iterable. - - See also :func:`groupby_transform`, which can be used with this function - to group ranges of items with the same `bool` value. - - """ - # Allow distance=0 mainly for testing that it reproduces results with map() - if distance < 0: - raise ValueError('distance must be at least 0') - - i1, i2 = tee(iterable) - padding = [False] * distance - selected = chain(padding, map(predicate, i1), padding) - adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) - return zip(adjacent_to_selected, i2) - - -def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): - """An extension of :func:`itertools.groupby` that can apply transformations - to the grouped data. - - * *keyfunc* is a function computing a key value for each item in *iterable* - * *valuefunc* is a function that transforms the individual items from - *iterable* after grouping - * *reducefunc* is a function that transforms each group of items - - >>> iterable = 'aAAbBBcCC' - >>> keyfunc = lambda k: k.upper() - >>> valuefunc = lambda v: v.lower() - >>> reducefunc = lambda g: ''.join(g) - >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) - [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] - - Each optional argument defaults to an identity function if not specified. - - :func:`groupby_transform` is useful when grouping elements of an iterable - using a separate iterable as the key. To do this, :func:`zip` the iterables - and pass a *keyfunc* that extracts the first element and a *valuefunc* - that extracts the second element:: - - >>> from operator import itemgetter - >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] - >>> values = 'abcdefghi' - >>> iterable = zip(keys, values) - >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) - >>> [(k, ''.join(g)) for k, g in grouper] - [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] - - Note that the order of items in the iterable is significant. - Only adjacent items are grouped together, so if you don't want any - duplicate groups, you should sort the iterable by the key function. - - """ - ret = groupby(iterable, keyfunc) - if valuefunc: - ret = ((k, map(valuefunc, g)) for k, g in ret) - if reducefunc: - ret = ((k, reducefunc(g)) for k, g in ret) - - return ret - - -class numeric_range(abc.Sequence, abc.Hashable): - """An extension of the built-in ``range()`` function whose arguments can - be any orderable numeric type. - - With only *stop* specified, *start* defaults to ``0`` and *step* - defaults to ``1``. The output items will match the type of *stop*: - - >>> list(numeric_range(3.5)) - [0.0, 1.0, 2.0, 3.0] - - With only *start* and *stop* specified, *step* defaults to ``1``. The - output items will match the type of *start*: - - >>> from decimal import Decimal - >>> start = Decimal('2.1') - >>> stop = Decimal('5.1') - >>> list(numeric_range(start, stop)) - [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] - - With *start*, *stop*, and *step* specified the output items will match - the type of ``start + step``: - - >>> from fractions import Fraction - >>> start = Fraction(1, 2) # Start at 1/2 - >>> stop = Fraction(5, 2) # End at 5/2 - >>> step = Fraction(1, 2) # Count by 1/2 - >>> list(numeric_range(start, stop, step)) - [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] - - If *step* is zero, ``ValueError`` is raised. Negative steps are supported: - - >>> list(numeric_range(3, -1, -1.0)) - [3.0, 2.0, 1.0, 0.0] - - Be aware of the limitations of floating point numbers; the representation - of the yielded numbers may be surprising. - - ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* - is a ``datetime.timedelta`` object: - - >>> import datetime - >>> start = datetime.datetime(2019, 1, 1) - >>> stop = datetime.datetime(2019, 1, 3) - >>> step = datetime.timedelta(days=1) - >>> items = iter(numeric_range(start, stop, step)) - >>> next(items) - datetime.datetime(2019, 1, 1, 0, 0) - >>> next(items) - datetime.datetime(2019, 1, 2, 0, 0) - - """ - - _EMPTY_HASH = hash(range(0, 0)) - - def __init__(self, *args): - argc = len(args) - if argc == 1: - (self._stop,) = args - self._start = type(self._stop)(0) - self._step = type(self._stop - self._start)(1) - elif argc == 2: - self._start, self._stop = args - self._step = type(self._stop - self._start)(1) - elif argc == 3: - self._start, self._stop, self._step = args - elif argc == 0: - raise TypeError( - 'numeric_range expected at least ' - '1 argument, got {}'.format(argc) - ) - else: - raise TypeError( - 'numeric_range expected at most ' - '3 arguments, got {}'.format(argc) - ) - - self._zero = type(self._step)(0) - if self._step == self._zero: - raise ValueError('numeric_range() arg 3 must not be zero') - self._growing = self._step > self._zero - self._init_len() - - def __bool__(self): - if self._growing: - return self._start < self._stop - else: - return self._start > self._stop - - def __contains__(self, elem): - if self._growing: - if self._start <= elem < self._stop: - return (elem - self._start) % self._step == self._zero - else: - if self._start >= elem > self._stop: - return (self._start - elem) % (-self._step) == self._zero - - return False - - def __eq__(self, other): - if isinstance(other, numeric_range): - empty_self = not bool(self) - empty_other = not bool(other) - if empty_self or empty_other: - return empty_self and empty_other # True if both empty - else: - return ( - self._start == other._start - and self._step == other._step - and self._get_by_index(-1) == other._get_by_index(-1) - ) - else: - return False - - def __getitem__(self, key): - if isinstance(key, int): - return self._get_by_index(key) - elif isinstance(key, slice): - step = self._step if key.step is None else key.step * self._step - - if key.start is None or key.start <= -self._len: - start = self._start - elif key.start >= self._len: - start = self._stop - else: # -self._len < key.start < self._len - start = self._get_by_index(key.start) - - if key.stop is None or key.stop >= self._len: - stop = self._stop - elif key.stop <= -self._len: - stop = self._start - else: # -self._len < key.stop < self._len - stop = self._get_by_index(key.stop) - - return numeric_range(start, stop, step) - else: - raise TypeError( - 'numeric range indices must be ' - 'integers or slices, not {}'.format(type(key).__name__) - ) - - def __hash__(self): - if self: - return hash((self._start, self._get_by_index(-1), self._step)) - else: - return self._EMPTY_HASH - - def __iter__(self): - values = (self._start + (n * self._step) for n in count()) - if self._growing: - return takewhile(partial(gt, self._stop), values) - else: - return takewhile(partial(lt, self._stop), values) - - def __len__(self): - return self._len - - def _init_len(self): - if self._growing: - start = self._start - stop = self._stop - step = self._step - else: - start = self._stop - stop = self._start - step = -self._step - distance = stop - start - if distance <= self._zero: - self._len = 0 - else: # distance > 0 and step > 0: regular euclidean division - q, r = divmod(distance, step) - self._len = int(q) + int(r != self._zero) - - def __reduce__(self): - return numeric_range, (self._start, self._stop, self._step) - - def __repr__(self): - if self._step == 1: - return "numeric_range({}, {})".format( - repr(self._start), repr(self._stop) - ) - else: - return "numeric_range({}, {}, {})".format( - repr(self._start), repr(self._stop), repr(self._step) - ) - - def __reversed__(self): - return iter( - numeric_range( - self._get_by_index(-1), self._start - self._step, -self._step - ) - ) - - def count(self, value): - return int(value in self) - - def index(self, value): - if self._growing: - if self._start <= value < self._stop: - q, r = divmod(value - self._start, self._step) - if r == self._zero: - return int(q) - else: - if self._start >= value > self._stop: - q, r = divmod(self._start - value, -self._step) - if r == self._zero: - return int(q) - - raise ValueError("{} is not in numeric range".format(value)) - - def _get_by_index(self, i): - if i < 0: - i += self._len - if i < 0 or i >= self._len: - raise IndexError("numeric range object index out of range") - return self._start + i * self._step - - -def count_cycle(iterable, n=None): - """Cycle through the items from *iterable* up to *n* times, yielding - the number of completed cycles along with each item. If *n* is omitted the - process repeats indefinitely. - - >>> list(count_cycle('AB', 3)) - [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] - - """ - iterable = tuple(iterable) - if not iterable: - return iter(()) - counter = count() if n is None else range(n) - return ((i, item) for i in counter for item in iterable) - - -def mark_ends(iterable): - """Yield 3-tuples of the form ``(is_first, is_last, item)``. - - >>> list(mark_ends('ABC')) - [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] - - Use this when looping over an iterable to take special action on its first - and/or last items: - - >>> iterable = ['Header', 100, 200, 'Footer'] - >>> total = 0 - >>> for is_first, is_last, item in mark_ends(iterable): - ... if is_first: - ... continue # Skip the header - ... if is_last: - ... continue # Skip the footer - ... total += item - >>> print(total) - 300 - """ - it = iter(iterable) - - try: - b = next(it) - except StopIteration: - return - - try: - for i in count(): - a = b - b = next(it) - yield i == 0, False, a - - except StopIteration: - yield i == 0, True, a - - -def locate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(locate([0, 1, 1, 0, 1, 0, 0])) - [1, 2, 4] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item. - - >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) - [1, 3] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(locate(iterable, pred=pred, window_size=3)) - [1, 5, 9] - - Use with :func:`seekable` to find indexes and then retrieve the associated - items: - - >>> from itertools import count - >>> from more_itertools import seekable - >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) - >>> it = seekable(source) - >>> pred = lambda x: x > 100 - >>> indexes = locate(it, pred=pred) - >>> i = next(indexes) - >>> it.seek(i) - >>> next(it) - 106 - - """ - if window_size is None: - return compress(count(), map(pred, iterable)) - - if window_size < 1: - raise ValueError('window size must be at least 1') - - it = windowed(iterable, window_size, fillvalue=_marker) - return compress(count(), starmap(pred, it)) - - -def lstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the beginning - for which *pred* returns ``True``. - - For example, to remove a set of items from the start of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(lstrip(iterable, pred)) - [1, 2, None, 3, False, None] - - This function is analogous to to :func:`str.lstrip`, and is essentially - an wrapper for :func:`itertools.dropwhile`. - - """ - return dropwhile(pred, iterable) - - -def rstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the end - for which *pred* returns ``True``. - - For example, to remove a set of items from the end of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(rstrip(iterable, pred)) - [None, False, None, 1, 2, None, 3] - - This function is analogous to :func:`str.rstrip`. - - """ - cache = [] - cache_append = cache.append - cache_clear = cache.clear - for x in iterable: - if pred(x): - cache_append(x) - else: - yield from cache - cache_clear() - yield x - - -def strip(iterable, pred): - """Yield the items from *iterable*, but strip any from the - beginning and end for which *pred* returns ``True``. - - For example, to remove a set of items from both ends of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(strip(iterable, pred)) - [1, 2, None, 3] - - This function is analogous to :func:`str.strip`. - - """ - return rstrip(lstrip(iterable, pred), pred) - - -class islice_extended: - """An extension of :func:`itertools.islice` that supports negative values - for *stop*, *start*, and *step*. - - >>> iterable = iter('abcdefgh') - >>> list(islice_extended(iterable, -4, -1)) - ['e', 'f', 'g'] - - Slices with negative values require some caching of *iterable*, but this - function takes care to minimize the amount of memory required. - - For example, you can use a negative step with an infinite iterator: - - >>> from itertools import count - >>> list(islice_extended(count(), 110, 99, -2)) - [110, 108, 106, 104, 102, 100] - - You can also use slice notation directly: - - >>> iterable = map(str, count()) - >>> it = islice_extended(iterable)[10:20:2] - >>> list(it) - ['10', '12', '14', '16', '18'] - - """ - - def __init__(self, iterable, *args): - it = iter(iterable) - if args: - self._iterable = _islice_helper(it, slice(*args)) - else: - self._iterable = it - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterable) - - def __getitem__(self, key): - if isinstance(key, slice): - return islice_extended(_islice_helper(self._iterable, key)) - - raise TypeError('islice_extended.__getitem__ argument must be a slice') - - -def _islice_helper(it, s): - start = s.start - stop = s.stop - if s.step == 0: - raise ValueError('step argument must be a non-zero integer or None.') - step = s.step or 1 - - if step > 0: - start = 0 if (start is None) else start - - if start < 0: - # Consume all but the last -start items - cache = deque(enumerate(it, 1), maxlen=-start) - len_iter = cache[-1][0] if cache else 0 - - # Adjust start to be positive - i = max(len_iter + start, 0) - - # Adjust stop to be positive - if stop is None: - j = len_iter - elif stop >= 0: - j = min(stop, len_iter) - else: - j = max(len_iter + stop, 0) - - # Slice the cache - n = j - i - if n <= 0: - return - - for index, item in islice(cache, 0, n, step): - yield item - elif (stop is not None) and (stop < 0): - # Advance to the start position - next(islice(it, start, start), None) - - # When stop is negative, we have to carry -stop items while - # iterating - cache = deque(islice(it, -stop), maxlen=-stop) - - for index, item in enumerate(it): - cached_item = cache.popleft() - if index % step == 0: - yield cached_item - cache.append(item) - else: - # When both start and stop are positive we have the normal case - yield from islice(it, start, stop, step) - else: - start = -1 if (start is None) else start - - if (stop is not None) and (stop < 0): - # Consume all but the last items - n = -stop - 1 - cache = deque(enumerate(it, 1), maxlen=n) - len_iter = cache[-1][0] if cache else 0 - - # If start and stop are both negative they are comparable and - # we can just slice. Otherwise we can adjust start to be negative - # and then slice. - if start < 0: - i, j = start, stop - else: - i, j = min(start - len_iter, -1), None - - for index, item in list(cache)[i:j:step]: - yield item - else: - # Advance to the stop position - if stop is not None: - m = stop + 1 - next(islice(it, m, m), None) - - # stop is positive, so if start is negative they are not comparable - # and we need the rest of the items. - if start < 0: - i = start - n = None - # stop is None and start is positive, so we just need items up to - # the start index. - elif stop is None: - i = None - n = start + 1 - # Both stop and start are positive, so they are comparable. - else: - i = None - n = start - stop - if n <= 0: - return - - cache = list(islice(it, n)) - - yield from cache[i::step] - - -def always_reversible(iterable): - """An extension of :func:`reversed` that supports all iterables, not - just those which implement the ``Reversible`` or ``Sequence`` protocols. - - >>> print(*always_reversible(x for x in range(3))) - 2 1 0 - - If the iterable is already reversible, this function returns the - result of :func:`reversed()`. If the iterable is not reversible, - this function will cache the remaining items in the iterable and - yield them in reverse order, which may require significant storage. - """ - try: - return reversed(iterable) - except TypeError: - return reversed(list(iterable)) - - -def consecutive_groups(iterable, ordering=lambda x: x): - """Yield groups of consecutive items using :func:`itertools.groupby`. - The *ordering* function determines whether two items are adjacent by - returning their position. - - By default, the ordering function is the identity function. This is - suitable for finding runs of numbers: - - >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] - >>> for group in consecutive_groups(iterable): - ... print(list(group)) - [1] - [10, 11, 12] - [20] - [30, 31, 32, 33] - [40] - - For finding runs of adjacent letters, try using the :meth:`index` method - of a string of letters: - - >>> from string import ascii_lowercase - >>> iterable = 'abcdfgilmnop' - >>> ordering = ascii_lowercase.index - >>> for group in consecutive_groups(iterable, ordering): - ... print(list(group)) - ['a', 'b', 'c', 'd'] - ['f', 'g'] - ['i'] - ['l', 'm', 'n', 'o', 'p'] - - Each group of consecutive items is an iterator that shares it source with - *iterable*. When an an output group is advanced, the previous group is - no longer available unless its elements are copied (e.g., into a ``list``). - - >>> iterable = [1, 2, 11, 12, 21, 22] - >>> saved_groups = [] - >>> for group in consecutive_groups(iterable): - ... saved_groups.append(list(group)) # Copy group elements - >>> saved_groups - [[1, 2], [11, 12], [21, 22]] - - """ - for k, g in groupby( - enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) - ): - yield map(itemgetter(1), g) - - -def difference(iterable, func=sub, *, initial=None): - """This function is the inverse of :func:`itertools.accumulate`. By default - it will compute the first difference of *iterable* using - :func:`operator.sub`: - - >>> from itertools import accumulate - >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 - >>> list(difference(iterable)) - [0, 1, 2, 3, 4] - - *func* defaults to :func:`operator.sub`, but other functions can be - specified. They will be applied as follows:: - - A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... - - For example, to do progressive division: - - >>> iterable = [1, 2, 6, 24, 120] - >>> func = lambda x, y: x // y - >>> list(difference(iterable, func)) - [1, 2, 3, 4, 5] - - If the *initial* keyword is set, the first element will be skipped when - computing successive differences. - - >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) - >>> list(difference(it, initial=10)) - [1, 2, 3] - - """ - a, b = tee(iterable) - try: - first = [next(b)] - except StopIteration: - return iter([]) - - if initial is not None: - first = [] - - return chain(first, starmap(func, zip(b, a))) - - -class SequenceView(Sequence): - """Return a read-only view of the sequence object *target*. - - :class:`SequenceView` objects are analogous to Python's built-in - "dictionary view" types. They provide a dynamic view of a sequence's items, - meaning that when the sequence updates, so does the view. - - >>> seq = ['0', '1', '2'] - >>> view = SequenceView(seq) - >>> view - SequenceView(['0', '1', '2']) - >>> seq.append('3') - >>> view - SequenceView(['0', '1', '2', '3']) - - Sequence views support indexing, slicing, and length queries. They act - like the underlying sequence, except they don't allow assignment: - - >>> view[1] - '1' - >>> view[1:-1] - ['1', '2'] - >>> len(view) - 4 - - Sequence views are useful as an alternative to copying, as they don't - require (much) extra storage. - - """ - - def __init__(self, target): - if not isinstance(target, Sequence): - raise TypeError - self._target = target - - def __getitem__(self, index): - return self._target[index] - - def __len__(self): - return len(self._target) - - def __repr__(self): - return '{}({})'.format(self.__class__.__name__, repr(self._target)) - - -class seekable: - """Wrap an iterator to allow for seeking backward and forward. This - progressively caches the items in the source iterable so they can be - re-visited. - - Call :meth:`seek` with an index to seek to that position in the source - iterable. - - To "reset" an iterator, seek to ``0``: - - >>> from itertools import count - >>> it = seekable((str(n) for n in count())) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> it.seek(0) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> next(it) - '3' - - You can also seek forward: - - >>> it = seekable((str(n) for n in range(20))) - >>> it.seek(10) - >>> next(it) - '10' - >>> it.seek(20) # Seeking past the end of the source isn't a problem - >>> list(it) - [] - >>> it.seek(0) # Resetting works even after hitting the end - >>> next(it), next(it), next(it) - ('0', '1', '2') - - Call :meth:`peek` to look ahead one item without advancing the iterator: - - >>> it = seekable('1234') - >>> it.peek() - '1' - >>> list(it) - ['1', '2', '3', '4'] - >>> it.peek(default='empty') - 'empty' - - Before the iterator is at its end, calling :func:`bool` on it will return - ``True``. After it will return ``False``: - - >>> it = seekable('5678') - >>> bool(it) - True - >>> list(it) - ['5', '6', '7', '8'] - >>> bool(it) - False - - You may view the contents of the cache with the :meth:`elements` method. - That returns a :class:`SequenceView`, a view that updates automatically: - - >>> it = seekable((str(n) for n in range(10))) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> elements = it.elements() - >>> elements - SequenceView(['0', '1', '2']) - >>> next(it) - '3' - >>> elements - SequenceView(['0', '1', '2', '3']) - - By default, the cache grows as the source iterable progresses, so beware of - wrapping very large or infinite iterables. Supply *maxlen* to limit the - size of the cache (this of course limits how far back you can seek). - - >>> from itertools import count - >>> it = seekable((str(n) for n in count()), maxlen=2) - >>> next(it), next(it), next(it), next(it) - ('0', '1', '2', '3') - >>> list(it.elements()) - ['2', '3'] - >>> it.seek(0) - >>> next(it), next(it), next(it), next(it) - ('2', '3', '4', '5') - >>> next(it) - '6' - - """ - - def __init__(self, iterable, maxlen=None): - self._source = iter(iterable) - if maxlen is None: - self._cache = [] - else: - self._cache = deque([], maxlen) - self._index = None - - def __iter__(self): - return self - - def __next__(self): - if self._index is not None: - try: - item = self._cache[self._index] - except IndexError: - self._index = None - else: - self._index += 1 - return item - - item = next(self._source) - self._cache.append(item) - return item - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - try: - peeked = next(self) - except StopIteration: - if default is _marker: - raise - return default - if self._index is None: - self._index = len(self._cache) - self._index -= 1 - return peeked - - def elements(self): - return SequenceView(self._cache) - - def seek(self, index): - self._index = index - remainder = index - len(self._cache) - if remainder > 0: - consume(self, remainder) - - -class run_length: - """ - :func:`run_length.encode` compresses an iterable with run-length encoding. - It yields groups of repeated items with the count of how many times they - were repeated: - - >>> uncompressed = 'abbcccdddd' - >>> list(run_length.encode(uncompressed)) - [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - - :func:`run_length.decode` decompresses an iterable that was previously - compressed with run-length encoding. It yields the items of the - decompressed iterable: - - >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> list(run_length.decode(compressed)) - ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] - - """ - - @staticmethod - def encode(iterable): - return ((k, ilen(g)) for k, g in groupby(iterable)) - - @staticmethod - def decode(iterable): - return chain.from_iterable(repeat(k, n) for k, n in iterable) - - -def exactly_n(iterable, n, predicate=bool): - """Return ``True`` if exactly ``n`` items in the iterable are ``True`` - according to the *predicate* function. - - >>> exactly_n([True, True, False], 2) - True - >>> exactly_n([True, True, False], 1) - False - >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) - True - - The iterable will be advanced until ``n + 1`` truthy items are encountered, - so avoid calling it on infinite iterables. - - """ - return len(take(n + 1, filter(predicate, iterable))) == n - - -def circular_shifts(iterable): - """Return a list of circular shifts of *iterable*. - - >>> circular_shifts(range(4)) - [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] - """ - lst = list(iterable) - return take(len(lst), windowed(cycle(lst), len(lst))) - - -def make_decorator(wrapping_func, result_index=0): - """Return a decorator version of *wrapping_func*, which is a function that - modifies an iterable. *result_index* is the position in that function's - signature where the iterable goes. - - This lets you use itertools on the "production end," i.e. at function - definition. This can augment what the function returns without changing the - function's code. - - For example, to produce a decorator version of :func:`chunked`: - - >>> from more_itertools import chunked - >>> chunker = make_decorator(chunked, result_index=0) - >>> @chunker(3) - ... def iter_range(n): - ... return iter(range(n)) - ... - >>> list(iter_range(9)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - - To only allow truthy items to be returned: - - >>> truth_serum = make_decorator(filter, result_index=1) - >>> @truth_serum(bool) - ... def boolean_test(): - ... return [0, 1, '', ' ', False, True] - ... - >>> list(boolean_test()) - [1, ' ', True] - - The :func:`peekable` and :func:`seekable` wrappers make for practical - decorators: - - >>> from more_itertools import peekable - >>> peekable_function = make_decorator(peekable) - >>> @peekable_function() - ... def str_range(*args): - ... return (str(x) for x in range(*args)) - ... - >>> it = str_range(1, 20, 2) - >>> next(it), next(it), next(it) - ('1', '3', '5') - >>> it.peek() - '7' - >>> next(it) - '7' - - """ - # See https://sites.google.com/site/bbayles/index/decorator_factory for - # notes on how this works. - def decorator(*wrapping_args, **wrapping_kwargs): - def outer_wrapper(f): - def inner_wrapper(*args, **kwargs): - result = f(*args, **kwargs) - wrapping_args_ = list(wrapping_args) - wrapping_args_.insert(result_index, result) - return wrapping_func(*wrapping_args_, **wrapping_kwargs) - - return inner_wrapper - - return outer_wrapper - - return decorator - - -def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): - """Return a dictionary that maps the items in *iterable* to categories - defined by *keyfunc*, transforms them with *valuefunc*, and - then summarizes them by category with *reducefunc*. - - *valuefunc* defaults to the identity function if it is unspecified. - If *reducefunc* is unspecified, no summarization takes place: - - >>> keyfunc = lambda x: x.upper() - >>> result = map_reduce('abbccc', keyfunc) - >>> sorted(result.items()) - [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] - - Specifying *valuefunc* transforms the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> result = map_reduce('abbccc', keyfunc, valuefunc) - >>> sorted(result.items()) - [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] - - Specifying *reducefunc* summarizes the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> reducefunc = sum - >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) - >>> sorted(result.items()) - [('A', 1), ('B', 2), ('C', 3)] - - You may want to filter the input iterable before applying the map/reduce - procedure: - - >>> all_items = range(30) - >>> items = [x for x in all_items if 10 <= x <= 20] # Filter - >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 - >>> categories = map_reduce(items, keyfunc=keyfunc) - >>> sorted(categories.items()) - [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] - >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) - >>> sorted(summaries.items()) - [(0, 90), (1, 75)] - - Note that all items in the iterable are gathered into a list before the - summarization step, which may require significant storage. - - The returned object is a :obj:`collections.defaultdict` with the - ``default_factory`` set to ``None``, such that it behaves like a normal - dictionary. - - """ - valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc - - ret = defaultdict(list) - for item in iterable: - key = keyfunc(item) - value = valuefunc(item) - ret[key].append(value) - - if reducefunc is not None: - for key, value_list in ret.items(): - ret[key] = reducefunc(value_list) - - ret.default_factory = None - return ret - - -def rlocate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``, starting from the right and moving left. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 - [4, 2, 1] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item: - - >>> iterable = iter('abcb') - >>> pred = lambda x: x == 'b' - >>> list(rlocate(iterable, pred)) - [3, 1] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(rlocate(iterable, pred=pred, window_size=3)) - [9, 5, 1] - - Beware, this function won't return anything for infinite iterables. - If *iterable* is reversible, ``rlocate`` will reverse it and search from - the right. Otherwise, it will search from the left and return the results - in reverse order. - - See :func:`locate` to for other example applications. - - """ - if window_size is None: - try: - len_iter = len(iterable) - return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) - except TypeError: - pass - - return reversed(list(locate(iterable, pred, window_size))) - - -def replace(iterable, pred, substitutes, count=None, window_size=1): - """Yield the items from *iterable*, replacing the items for which *pred* - returns ``True`` with the items from the iterable *substitutes*. - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] - >>> pred = lambda x: x == 0 - >>> substitutes = (2, 3) - >>> list(replace(iterable, pred, substitutes)) - [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] - - If *count* is given, the number of replacements will be limited: - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] - >>> pred = lambda x: x == 0 - >>> substitutes = [None] - >>> list(replace(iterable, pred, substitutes, count=2)) - [1, 1, None, 1, 1, None, 1, 1, 0] - - Use *window_size* to control the number of items passed as arguments to - *pred*. This allows for locating and replacing subsequences. - - >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] - >>> window_size = 3 - >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred - >>> substitutes = [3, 4] # Splice in these items - >>> list(replace(iterable, pred, substitutes, window_size=window_size)) - [3, 4, 5, 3, 4, 5] - - """ - if window_size < 1: - raise ValueError('window_size must be at least 1') - - # Save the substitutes iterable, since it's used more than once - substitutes = tuple(substitutes) - - # Add padding such that the number of windows matches the length of the - # iterable - it = chain(iterable, [_marker] * (window_size - 1)) - windows = windowed(it, window_size) - - n = 0 - for w in windows: - # If the current window matches our predicate (and we haven't hit - # our maximum number of replacements), splice in the substitutes - # and then consume the following windows that overlap with this one. - # For example, if the iterable is (0, 1, 2, 3, 4...) - # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... - # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) - if pred(*w): - if (count is None) or (n < count): - n += 1 - yield from substitutes - consume(windows, window_size - 1) - continue - - # If there was no match (or we've reached the replacement limit), - # yield the first item from the window. - if w and (w[0] is not _marker): - yield w[0] - - -def partitions(iterable): - """Yield all possible order-preserving partitions of *iterable*. - - >>> iterable = 'abc' - >>> for part in partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['a', 'b', 'c'] - - This is unrelated to :func:`partition`. - - """ - sequence = list(iterable) - n = len(sequence) - for i in powerset(range(1, n)): - yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] - - -def set_partitions(iterable, k=None): - """ - Yield the set partitions of *iterable* into *k* parts. Set partitions are - not order-preserving. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable, 2): - ... print([''.join(p) for p in part]) - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - - - If *k* is not given, every set partition is generated. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - ['a', 'b', 'c'] - - """ - L = list(iterable) - n = len(L) - if k is not None: - if k < 1: - raise ValueError( - "Can't partition in a negative or zero number of groups" - ) - elif k > n: - return - - def set_partitions_helper(L, k): - n = len(L) - if k == 1: - yield [L] - elif n == k: - yield [[s] for s in L] - else: - e, *M = L - for p in set_partitions_helper(M, k - 1): - yield [[e], *p] - for p in set_partitions_helper(M, k): - for i in range(len(p)): - yield p[:i] + [[e] + p[i]] + p[i + 1 :] - - if k is None: - for k in range(1, n + 1): - yield from set_partitions_helper(L, k) - else: - yield from set_partitions_helper(L, k) - - -class time_limited: - """ - Yield items from *iterable* until *limit_seconds* have passed. - If the time limit expires before all items have been yielded, the - ``timed_out`` parameter will be set to ``True``. - - >>> from time import sleep - >>> def generator(): - ... yield 1 - ... yield 2 - ... sleep(0.2) - ... yield 3 - >>> iterable = time_limited(0.1, generator()) - >>> list(iterable) - [1, 2] - >>> iterable.timed_out - True - - Note that the time is checked before each item is yielded, and iteration - stops if the time elapsed is greater than *limit_seconds*. If your time - limit is 1 second, but it takes 2 seconds to generate the first item from - the iterable, the function will run for 2 seconds and not yield anything. - - """ - - def __init__(self, limit_seconds, iterable): - if limit_seconds < 0: - raise ValueError('limit_seconds must be positive') - self.limit_seconds = limit_seconds - self._iterable = iter(iterable) - self._start_time = monotonic() - self.timed_out = False - - def __iter__(self): - return self - - def __next__(self): - item = next(self._iterable) - if monotonic() - self._start_time > self.limit_seconds: - self.timed_out = True - raise StopIteration - - return item - - -def only(iterable, default=None, too_long=None): - """If *iterable* has only one item, return it. - If it has zero items, return *default*. - If it has more than one item, raise the exception given by *too_long*, - which is ``ValueError`` by default. - - >>> only([], default='missing') - 'missing' - >>> only([1]) - 1 - >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 1, 2, - and perhaps more.' - >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - TypeError - - Note that :func:`only` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check - iterable contents less destructively. - """ - it = iter(iterable) - first_value = next(it, default) - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def ichunked(iterable, n): - """Break *iterable* into sub-iterables with *n* elements each. - :func:`ichunked` is like :func:`chunked`, but it yields iterables - instead of lists. - - If the sub-iterables are read in order, the elements of *iterable* - won't be stored in memory. - If they are read out of order, :func:`itertools.tee` is used to cache - elements as necessary. - - >>> from itertools import count - >>> all_chunks = ichunked(count(), 4) - >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) - >>> list(c_2) # c_1's elements have been cached; c_3's haven't been - [4, 5, 6, 7] - >>> list(c_1) - [0, 1, 2, 3] - >>> list(c_3) - [8, 9, 10, 11] - - """ - source = iter(iterable) - - while True: - # Check to see whether we're at the end of the source iterable - item = next(source, _marker) - if item is _marker: - return - - # Clone the source and yield an n-length slice - source, it = tee(chain([item], source)) - yield islice(it, n) - - # Advance the source iterable - consume(source, n) - - -def distinct_combinations(iterable, r): - """Yield the distinct combinations of *r* items taken from *iterable*. - - >>> list(distinct_combinations([0, 0, 1], 2)) - [(0, 0), (0, 1)] - - Equivalent to ``set(combinations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - """ - if r < 0: - raise ValueError('r must be non-negative') - elif r == 0: - yield () - return - pool = tuple(iterable) - generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] - current_combo = [None] * r - level = 0 - while generators: - try: - cur_idx, p = next(generators[-1]) - except StopIteration: - generators.pop() - level -= 1 - continue - current_combo[level] = p - if level + 1 == r: - yield tuple(current_combo) - else: - generators.append( - unique_everseen( - enumerate(pool[cur_idx + 1 :], cur_idx + 1), - key=itemgetter(1), - ) - ) - level += 1 - - -def filter_except(validator, iterable, *exceptions): - """Yield the items from *iterable* for which the *validator* function does - not raise one of the specified *exceptions*. - - *validator* is called for each item in *iterable*. - It should be a function that accepts one argument and raises an exception - if that item is not valid. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(filter_except(int, iterable, ValueError, TypeError)) - ['1', '2', '4'] - - If an exception other than one given by *exceptions* is raised by - *validator*, it is raised like normal. - """ - for item in iterable: - try: - validator(item) - except exceptions: - pass - else: - yield item - - -def map_except(function, iterable, *exceptions): - """Transform each item from *iterable* with *function* and yield the - result, unless *function* raises one of the specified *exceptions*. - - *function* is called to transform each item in *iterable*. - It should be a accept one argument. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(map_except(int, iterable, ValueError, TypeError)) - [1, 2, 4] - - If an exception other than one given by *exceptions* is raised by - *function*, it is raised like normal. - """ - for item in iterable: - try: - yield function(item) - except exceptions: - pass - - -def _sample_unweighted(iterable, k): - # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: - # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". - - # Fill up the reservoir (collection of samples) with the first `k` samples - reservoir = take(k, iterable) - - # Generate random number that's the largest in a sample of k U(0,1) numbers - # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic - W = exp(log(random()) / k) - - # The number of elements to skip before changing the reservoir is a random - # number with a geometric distribution. Sample it using random() and logs. - next_index = k + floor(log(random()) / log(1 - W)) - - for index, element in enumerate(iterable, k): - - if index == next_index: - reservoir[randrange(k)] = element - # The new W is the largest in a sample of k U(0, `old_W`) numbers - W *= exp(log(random()) / k) - next_index += floor(log(random()) / log(1 - W)) + 1 - - return reservoir - - -def _sample_weighted(iterable, k, weights): - # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : - # "Weighted random sampling with a reservoir". - - # Log-transform for numerical stability for weights that are small/large - weight_keys = (log(random()) / weight for weight in weights) - - # Fill up the reservoir (collection of samples) with the first `k` - # weight-keys and elements, then heapify the list. - reservoir = take(k, zip(weight_keys, iterable)) - heapify(reservoir) - - # The number of jumps before changing the reservoir is a random variable - # with an exponential distribution. Sample it using random() and logs. - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - - for weight, element in zip(weights, iterable): - if weight >= weights_to_skip: - # The notation here is consistent with the paper, but we store - # the weight-keys in log-space for better numerical stability. - smallest_weight_key, _ = reservoir[0] - t_w = exp(weight * smallest_weight_key) - r_2 = uniform(t_w, 1) # generate U(t_w, 1) - weight_key = log(r_2) / weight - heapreplace(reservoir, (weight_key, element)) - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - else: - weights_to_skip -= weight - - # Equivalent to [element for weight_key, element in sorted(reservoir)] - return [heappop(reservoir)[1] for _ in range(k)] - - -def sample(iterable, k, weights=None): - """Return a *k*-length list of elements chosen (without replacement) - from the *iterable*. Like :func:`random.sample`, but works on iterables - of unknown length. - - >>> iterable = range(100) - >>> sample(iterable, 5) # doctest: +SKIP - [81, 60, 96, 16, 4] - - An iterable with *weights* may also be given: - - >>> iterable = range(100) - >>> weights = (i * i + 1 for i in range(100)) - >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP - [79, 67, 74, 66, 78] - - The algorithm can also be used to generate weighted random permutations. - The relative weight of each item determines the probability that it - appears late in the permutation. - - >>> data = "abcdefgh" - >>> weights = range(1, len(data) + 1) - >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP - ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] - """ - if k == 0: - return [] - - iterable = iter(iterable) - if weights is None: - return _sample_unweighted(iterable, k) - else: - weights = iter(weights) - return _sample_weighted(iterable, k, weights) - - -def is_sorted(iterable, key=None, reverse=False): - """Returns ``True`` if the items of iterable are in sorted order, and - ``False`` otherwise. *key* and *reverse* have the same meaning that they do - in the built-in :func:`sorted` function. - - >>> is_sorted(['1', '2', '3', '4', '5'], key=int) - True - >>> is_sorted([5, 4, 3, 1, 2], reverse=True) - False - - The function returns ``False`` after encountering the first out-of-order - item. If there are no out-of-order items, the iterable is exhausted. - """ - - compare = lt if reverse else gt - it = iterable if (key is None) else map(key, iterable) - return not any(starmap(compare, pairwise(it))) - - -class AbortThread(BaseException): - pass - - -class callback_iter: - """Convert a function that uses callbacks to an iterator. - - Let *func* be a function that takes a `callback` keyword argument. - For example: - - >>> def func(callback=None): - ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: - ... if callback: - ... callback(i, c) - ... return 4 - - - Use ``with callback_iter(func)`` to get an iterator over the parameters - that are delivered to the callback. - - >>> with callback_iter(func) as it: - ... for args, kwargs in it: - ... print(args) - (1, 'a') - (2, 'b') - (3, 'c') - - The function will be called in a background thread. The ``done`` property - indicates whether it has completed execution. - - >>> it.done - True - - If it completes successfully, its return value will be available - in the ``result`` property. - - >>> it.result - 4 - - Notes: - - * If the function uses some keyword argument besides ``callback``, supply - *callback_kwd*. - * If it finished executing, but raised an exception, accessing the - ``result`` property will raise the same exception. - * If it hasn't finished executing, accessing the ``result`` - property from within the ``with`` block will raise ``RuntimeError``. - * If it hasn't finished executing, accessing the ``result`` property from - outside the ``with`` block will raise a - ``more_itertools.AbortThread`` exception. - * Provide *wait_seconds* to adjust how frequently the it is polled for - output. - - """ - - def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): - self._func = func - self._callback_kwd = callback_kwd - self._aborted = False - self._future = None - self._wait_seconds = wait_seconds - self._executor = ThreadPoolExecutor(max_workers=1) - self._iterator = self._reader() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self._aborted = True - self._executor.shutdown() - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterator) - - @property - def done(self): - if self._future is None: - return False - return self._future.done() - - @property - def result(self): - if not self.done: - raise RuntimeError('Function has not yet completed') - - return self._future.result() - - def _reader(self): - q = Queue() - - def callback(*args, **kwargs): - if self._aborted: - raise AbortThread('canceled by user') - - q.put((args, kwargs)) - - self._future = self._executor.submit( - self._func, **{self._callback_kwd: callback} - ) - - while True: - try: - item = q.get(timeout=self._wait_seconds) - except Empty: - pass - else: - q.task_done() - yield item - - if self._future.done(): - break - - remaining = [] - while True: - try: - item = q.get_nowait() - except Empty: - break - else: - q.task_done() - remaining.append(item) - q.join() - yield from remaining - - -def windowed_complete(iterable, n): - """ - Yield ``(beginning, middle, end)`` tuples, where: - - * Each ``middle`` has *n* items from *iterable* - * Each ``beginning`` has the items before the ones in ``middle`` - * Each ``end`` has the items after the ones in ``middle`` - - >>> iterable = range(7) - >>> n = 3 - >>> for beginning, middle, end in windowed_complete(iterable, n): - ... print(beginning, middle, end) - () (0, 1, 2) (3, 4, 5, 6) - (0,) (1, 2, 3) (4, 5, 6) - (0, 1) (2, 3, 4) (5, 6) - (0, 1, 2) (3, 4, 5) (6,) - (0, 1, 2, 3) (4, 5, 6) () - - Note that *n* must be at least 0 and most equal to the length of - *iterable*. - - This function will exhaust the iterable and may require significant - storage. - """ - if n < 0: - raise ValueError('n must be >= 0') - - seq = tuple(iterable) - size = len(seq) - - if n > size: - raise ValueError('n must be <= len(seq)') - - for i in range(size - n + 1): - beginning = seq[:i] - middle = seq[i : i + n] - end = seq[i + n :] - yield beginning, middle, end - - -def all_unique(iterable, key=None): - """ - Returns ``True`` if all the elements of *iterable* are unique (no two - elements are equal). - - >>> all_unique('ABCB') - False - - If a *key* function is specified, it will be used to make comparisons. - - >>> all_unique('ABCb') - True - >>> all_unique('ABCb', str.lower) - False - - The function returns as soon as the first non-unique element is - encountered. Iterables with a mix of hashable and unhashable items can - be used, but the function will be slower for unhashable items. - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - for element in map(key, iterable) if key else iterable: - try: - if element in seenset: - return False - seenset_add(element) - except TypeError: - if element in seenlist: - return False - seenlist_add(element) - return True - - -def nth_product(index, *args): - """Equivalent to ``list(product(*args))[index]``. - - The products of *args* can be ordered lexicographically. - :func:`nth_product` computes the product at sort position *index* without - computing the previous products. - - >>> nth_product(8, range(2), range(2), range(2), range(2)) - (1, 0, 0, 0) - - ``IndexError`` will be raised if the given *index* is invalid. - """ - pools = list(map(tuple, reversed(args))) - ns = list(map(len, pools)) - - c = reduce(mul, ns) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - result = [] - for pool, n in zip(pools, ns): - result.append(pool[index % n]) - index //= n - - return tuple(reversed(result)) - - -def nth_permutation(iterable, r, index): - """Equivalent to ``list(permutations(iterable, r))[index]``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`nth_permutation` - computes the subsequence at sort position *index* directly, without - computing the previous subsequences. - - >>> nth_permutation('ghijk', 2, 5) - ('h', 'i') - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = list(iterable) - n = len(pool) - - if r is None or r == n: - r, c = n, factorial(n) - elif not 0 <= r < n: - raise ValueError - else: - c = factorial(n) // factorial(n - r) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - if c == 0: - return tuple() - - result = [0] * r - q = index * factorial(n) // c if r < n else index - for d in range(1, n + 1): - q, i = divmod(q, d) - if 0 <= n - d < r: - result[n - d] = i - if q == 0: - break - - return tuple(map(pool.pop, result)) - - -def value_chain(*args): - """Yield all arguments passed to the function in the same order in which - they were passed. If an argument itself is iterable then iterate over its - values. - - >>> list(value_chain(1, 2, 3, [4, 5, 6])) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and are emitted - as-is: - - >>> list(value_chain('12', '34', ['56', '78'])) - ['12', '34', '56', '78'] - - - Multiple levels of nesting are not flattened. - - """ - for value in args: - if isinstance(value, (str, bytes)): - yield value - continue - try: - yield from value - except TypeError: - yield value - - -def product_index(element, *args): - """Equivalent to ``list(product(*args)).index(element)`` - - The products of *args* can be ordered lexicographically. - :func:`product_index` computes the first index of *element* without - computing the previous products. - - >>> product_index([8, 2], range(10), range(5)) - 42 - - ``ValueError`` will be raised if the given *element* isn't in the product - of *args*. - """ - index = 0 - - for x, pool in zip_longest(element, args, fillvalue=_marker): - if x is _marker or pool is _marker: - raise ValueError('element is not a product of args') - - pool = tuple(pool) - index = index * len(pool) + pool.index(x) - - return index - - -def combination_index(element, iterable): - """Equivalent to ``list(combinations(iterable, r)).index(element)`` - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`combination_index` computes the index of the - first *element*, without computing the previous combinations. - - >>> combination_index('adf', 'abcdefg') - 10 - - ``ValueError`` will be raised if the given *element* isn't one of the - combinations of *iterable*. - """ - element = enumerate(element) - k, y = next(element, (None, None)) - if k is None: - return 0 - - indexes = [] - pool = enumerate(iterable) - for n, x in pool: - if x == y: - indexes.append(n) - tmp, y = next(element, (None, None)) - if tmp is None: - break - else: - k = tmp - else: - raise ValueError('element is not a combination of iterable') - - n, _ = last(pool, default=(n, None)) - - # Python versiosn below 3.8 don't have math.comb - index = 1 - for i, j in enumerate(reversed(indexes), start=1): - j = n - j - if i <= j: - index += factorial(j) // (factorial(i) * factorial(j - i)) - - return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index - - -def permutation_index(element, iterable): - """Equivalent to ``list(permutations(iterable, r)).index(element)``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`permutation_index` - computes the index of the first *element* directly, without computing - the previous permutations. - - >>> permutation_index([1, 3, 2], range(5)) - 19 - - ``ValueError`` will be raised if the given *element* isn't one of the - permutations of *iterable*. - """ - index = 0 - pool = list(iterable) - for i, x in zip(range(len(pool), -1, -1), element): - r = pool.index(x) - index = index * i + r - del pool[r] - - return index - - -class countable: - """Wrap *iterable* and keep a count of how many items have been consumed. - - The ``items_seen`` attribute starts at ``0`` and increments as the iterable - is consumed: - - >>> iterable = map(str, range(10)) - >>> it = countable(iterable) - >>> it.items_seen - 0 - >>> next(it), next(it) - ('0', '1') - >>> list(it) - ['2', '3', '4', '5', '6', '7', '8', '9'] - >>> it.items_seen - 10 - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self.items_seen = 0 - - def __iter__(self): - return self - - def __next__(self): - item = next(self._it) - self.items_seen += 1 - - return item diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/recipes.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/recipes.py deleted file mode 100755 index 521abd7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/more_itertools/recipes.py +++ /dev/null @@ -1,620 +0,0 @@ -"""Imported from the recipes section of the itertools documentation. - -All functions taken from the recipes section of the itertools library docs -[1]_. -Some backward-compatible usability improvements have been made. - -.. [1] http://docs.python.org/library/itertools.html#recipes - -""" -import warnings -from collections import deque -from itertools import ( - chain, - combinations, - count, - cycle, - groupby, - islice, - repeat, - starmap, - tee, - zip_longest, -) -import operator -from random import randrange, sample, choice - -__all__ = [ - 'all_equal', - 'consume', - 'convolve', - 'dotproduct', - 'first_true', - 'flatten', - 'grouper', - 'iter_except', - 'ncycles', - 'nth', - 'nth_combination', - 'padnone', - 'pad_none', - 'pairwise', - 'partition', - 'powerset', - 'prepend', - 'quantify', - 'random_combination_with_replacement', - 'random_combination', - 'random_permutation', - 'random_product', - 'repeatfunc', - 'roundrobin', - 'tabulate', - 'tail', - 'take', - 'unique_everseen', - 'unique_justseen', -] - - -def take(n, iterable): - """Return first *n* items of the iterable as a list. - - >>> take(3, range(10)) - [0, 1, 2] - - If there are fewer than *n* items in the iterable, all of them are - returned. - - >>> take(10, range(3)) - [0, 1, 2] - - """ - return list(islice(iterable, n)) - - -def tabulate(function, start=0): - """Return an iterator over the results of ``func(start)``, - ``func(start + 1)``, ``func(start + 2)``... - - *func* should be a function that accepts one integer argument. - - If *start* is not specified it defaults to 0. It will be incremented each - time the iterator is advanced. - - >>> square = lambda x: x ** 2 - >>> iterator = tabulate(square, -3) - >>> take(4, iterator) - [9, 4, 1, 0] - - """ - return map(function, count(start)) - - -def tail(n, iterable): - """Return an iterator over the last *n* items of *iterable*. - - >>> t = tail(3, 'ABCDEFG') - >>> list(t) - ['E', 'F', 'G'] - - """ - return iter(deque(iterable, maxlen=n)) - - -def consume(iterator, n=None): - """Advance *iterable* by *n* steps. If *n* is ``None``, consume it - entirely. - - Efficiently exhausts an iterator without returning values. Defaults to - consuming the whole iterator, but an optional second argument may be - provided to limit consumption. - - >>> i = (x for x in range(10)) - >>> next(i) - 0 - >>> consume(i, 3) - >>> next(i) - 4 - >>> consume(i) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - If the iterator has fewer items remaining than the provided limit, the - whole iterator will be consumed. - - >>> i = (x for x in range(3)) - >>> consume(i, 5) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - """ - # Use functions that consume iterators at C speed. - if n is None: - # feed the entire iterator into a zero-length deque - deque(iterator, maxlen=0) - else: - # advance to the empty slice starting at position n - next(islice(iterator, n, n), None) - - -def nth(iterable, n, default=None): - """Returns the nth item or a default value. - - >>> l = range(10) - >>> nth(l, 3) - 3 - >>> nth(l, 20, "zebra") - 'zebra' - - """ - return next(islice(iterable, n, None), default) - - -def all_equal(iterable): - """ - Returns ``True`` if all the elements are equal to each other. - - >>> all_equal('aaaa') - True - >>> all_equal('aaab') - False - - """ - g = groupby(iterable) - return next(g, True) and not next(g, False) - - -def quantify(iterable, pred=bool): - """Return the how many times the predicate is true. - - >>> quantify([True, False, True]) - 2 - - """ - return sum(map(pred, iterable)) - - -def pad_none(iterable): - """Returns the sequence of elements and then returns ``None`` indefinitely. - - >>> take(5, pad_none(range(3))) - [0, 1, 2, None, None] - - Useful for emulating the behavior of the built-in :func:`map` function. - - See also :func:`padded`. - - """ - return chain(iterable, repeat(None)) - - -padnone = pad_none - - -def ncycles(iterable, n): - """Returns the sequence elements *n* times - - >>> list(ncycles(["a", "b"], 3)) - ['a', 'b', 'a', 'b', 'a', 'b'] - - """ - return chain.from_iterable(repeat(tuple(iterable), n)) - - -def dotproduct(vec1, vec2): - """Returns the dot product of the two iterables. - - >>> dotproduct([10, 10], [20, 20]) - 400 - - """ - return sum(map(operator.mul, vec1, vec2)) - - -def flatten(listOfLists): - """Return an iterator flattening one level of nesting in a list of lists. - - >>> list(flatten([[0, 1], [2, 3]])) - [0, 1, 2, 3] - - See also :func:`collapse`, which can flatten multiple levels of nesting. - - """ - return chain.from_iterable(listOfLists) - - -def repeatfunc(func, times=None, *args): - """Call *func* with *args* repeatedly, returning an iterable over the - results. - - If *times* is specified, the iterable will terminate after that many - repetitions: - - >>> from operator import add - >>> times = 4 - >>> args = 3, 5 - >>> list(repeatfunc(add, times, *args)) - [8, 8, 8, 8] - - If *times* is ``None`` the iterable will not terminate: - - >>> from random import randrange - >>> times = None - >>> args = 1, 11 - >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP - [2, 4, 8, 1, 8, 4] - - """ - if times is None: - return starmap(func, repeat(args)) - return starmap(func, repeat(args, times)) - - -def _pairwise(iterable): - """Returns an iterator of paired items, overlapping, from the original - - >>> take(4, pairwise(count())) - [(0, 1), (1, 2), (2, 3), (3, 4)] - - On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. - - """ - a, b = tee(iterable) - next(b, None) - yield from zip(a, b) - - -try: - from itertools import pairwise as itertools_pairwise -except ImportError: - pairwise = _pairwise -else: - - def pairwise(iterable): - yield from itertools_pairwise(iterable) - - pairwise.__doc__ = _pairwise.__doc__ - - -def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks. - - >>> list(grouper('ABCDEFG', 3, 'x')) - [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] - - """ - if isinstance(iterable, int): - warnings.warn( - "grouper expects iterable as first parameter", DeprecationWarning - ) - n, iterable = iterable, n - args = [iter(iterable)] * n - return zip_longest(fillvalue=fillvalue, *args) - - -def roundrobin(*iterables): - """Yields an item from each iterable, alternating between them. - - >>> list(roundrobin('ABC', 'D', 'EF')) - ['A', 'D', 'E', 'B', 'F', 'C'] - - This function produces the same output as :func:`interleave_longest`, but - may perform better for some inputs (in particular when the number of - iterables is small). - - """ - # Recipe credited to George Sakkis - pending = len(iterables) - nexts = cycle(iter(it).__next__ for it in iterables) - while pending: - try: - for next in nexts: - yield next() - except StopIteration: - pending -= 1 - nexts = cycle(islice(nexts, pending)) - - -def partition(pred, iterable): - """ - Returns a 2-tuple of iterables derived from the input iterable. - The first yields the items that have ``pred(item) == False``. - The second yields the items that have ``pred(item) == True``. - - >>> is_odd = lambda x: x % 2 != 0 - >>> iterable = range(10) - >>> even_items, odd_items = partition(is_odd, iterable) - >>> list(even_items), list(odd_items) - ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) - - If *pred* is None, :func:`bool` is used. - - >>> iterable = [0, 1, False, True, '', ' '] - >>> false_items, true_items = partition(None, iterable) - >>> list(false_items), list(true_items) - ([0, False, ''], [1, True, ' ']) - - """ - if pred is None: - pred = bool - - evaluations = ((pred(x), x) for x in iterable) - t1, t2 = tee(evaluations) - return ( - (x for (cond, x) in t1 if not cond), - (x for (cond, x) in t2 if cond), - ) - - -def powerset(iterable): - """Yields all possible subsets of the iterable. - - >>> list(powerset([1, 2, 3])) - [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] - - :func:`powerset` will operate on iterables that aren't :class:`set` - instances, so repeated elements in the input will produce repeated elements - in the output. Use :func:`unique_everseen` on the input to avoid generating - duplicates: - - >>> seq = [1, 1, 0] - >>> list(powerset(seq)) - [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] - >>> from more_itertools import unique_everseen - >>> list(powerset(unique_everseen(seq))) - [(), (1,), (0,), (1, 0)] - - """ - s = list(iterable) - return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) - - -def unique_everseen(iterable, key=None): - """ - Yield unique elements, preserving order. - - >>> list(unique_everseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D'] - >>> list(unique_everseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'D'] - - Sequences with a mix of hashable and unhashable items can be used. - The function will be slower (i.e., `O(n^2)`) for unhashable items. - - Remember that ``list`` objects are unhashable - you can use the *key* - parameter to transform the list to a tuple (which is hashable) to - avoid a slowdown. - - >>> iterable = ([1, 2], [2, 3], [1, 2]) - >>> list(unique_everseen(iterable)) # Slow - [[1, 2], [2, 3]] - >>> list(unique_everseen(iterable, key=tuple)) # Faster - [[1, 2], [2, 3]] - - Similary, you may want to convert unhashable ``set`` objects with - ``key=frozenset``. For ``dict`` objects, - ``key=lambda x: frozenset(x.items())`` can be used. - - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - use_key = key is not None - - for element in iterable: - k = key(element) if use_key else element - try: - if k not in seenset: - seenset_add(k) - yield element - except TypeError: - if k not in seenlist: - seenlist_add(k) - yield element - - -def unique_justseen(iterable, key=None): - """Yields elements in order, ignoring serial duplicates - - >>> list(unique_justseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D', 'A', 'B'] - >>> list(unique_justseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'A', 'D'] - - """ - return map(next, map(operator.itemgetter(1), groupby(iterable, key))) - - -def iter_except(func, exception, first=None): - """Yields results from a function repeatedly until an exception is raised. - - Converts a call-until-exception interface to an iterator interface. - Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel - to end the loop. - - >>> l = [0, 1, 2] - >>> list(iter_except(l.pop, IndexError)) - [2, 1, 0] - - """ - try: - if first is not None: - yield first() - while 1: - yield func() - except exception: - pass - - -def first_true(iterable, default=None, pred=None): - """ - Returns the first true value in the iterable. - - If no true value is found, returns *default* - - If *pred* is not None, returns the first item for which - ``pred(item) == True`` . - - >>> first_true(range(10)) - 1 - >>> first_true(range(10), pred=lambda x: x > 5) - 6 - >>> first_true(range(10), default='missing', pred=lambda x: x > 9) - 'missing' - - """ - return next(filter(pred, iterable), default) - - -def random_product(*args, repeat=1): - """Draw an item at random from each of the input iterables. - - >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP - ('c', 3, 'Z') - - If *repeat* is provided as a keyword argument, that many items will be - drawn from each iterable. - - >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP - ('a', 2, 'd', 3) - - This equivalent to taking a random selection from - ``itertools.product(*args, **kwarg)``. - - """ - pools = [tuple(pool) for pool in args] * repeat - return tuple(choice(pool) for pool in pools) - - -def random_permutation(iterable, r=None): - """Return a random *r* length permutation of the elements in *iterable*. - - If *r* is not specified or is ``None``, then *r* defaults to the length of - *iterable*. - - >>> random_permutation(range(5)) # doctest:+SKIP - (3, 4, 0, 1, 2) - - This equivalent to taking a random selection from - ``itertools.permutations(iterable, r)``. - - """ - pool = tuple(iterable) - r = len(pool) if r is None else r - return tuple(sample(pool, r)) - - -def random_combination(iterable, r): - """Return a random *r* length subsequence of the elements in *iterable*. - - >>> random_combination(range(5), 3) # doctest:+SKIP - (2, 3, 4) - - This equivalent to taking a random selection from - ``itertools.combinations(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(sample(range(n), r)) - return tuple(pool[i] for i in indices) - - -def random_combination_with_replacement(iterable, r): - """Return a random *r* length subsequence of elements in *iterable*, - allowing individual elements to be repeated. - - >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP - (0, 0, 1, 2, 2) - - This equivalent to taking a random selection from - ``itertools.combinations_with_replacement(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(randrange(n) for i in range(r)) - return tuple(pool[i] for i in indices) - - -def nth_combination(iterable, r, index): - """Equivalent to ``list(combinations(iterable, r))[index]``. - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`nth_combination` computes the subsequence at - sort position *index* directly, without computing the previous - subsequences. - - >>> nth_combination(range(5), 3, 5) - (0, 3, 4) - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = tuple(iterable) - n = len(pool) - if (r < 0) or (r > n): - raise ValueError - - c = 1 - k = min(r, n - r) - for i in range(1, k + 1): - c = c * (n - k + i) // i - - if index < 0: - index += c - - if (index < 0) or (index >= c): - raise IndexError - - result = [] - while r: - c, n, r = c * r // n, n - 1, r - 1 - while index >= c: - index -= c - c, n = c * (n - r) // n, n - 1 - result.append(pool[-1 - n]) - - return tuple(result) - - -def prepend(value, iterator): - """Yield *value*, followed by the elements in *iterator*. - - >>> value = '0' - >>> iterator = ['1', '2', '3'] - >>> list(prepend(value, iterator)) - ['0', '1', '2', '3'] - - To prepend multiple values, see :func:`itertools.chain` - or :func:`value_chain`. - - """ - return chain([value], iterator) - - -def convolve(signal, kernel): - """Convolve the iterable *signal* with the iterable *kernel*. - - >>> signal = (1, 2, 3, 4, 5) - >>> kernel = [3, 2, 1] - >>> list(convolve(signal, kernel)) - [3, 8, 14, 20, 26, 14, 5] - - Note: the input arguments are not interchangeable, as the *kernel* - is immediately consumed and stored. - - """ - kernel = tuple(kernel)[::-1] - n = len(kernel) - window = deque([0], maxlen=n) * n - for x in chain(signal, repeat(0, n - 1)): - window.append(x) - yield sum(map(operator.mul, kernel, window)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/ordered_set.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/ordered_set.py deleted file mode 100755 index 1487600..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/ordered_set.py +++ /dev/null @@ -1,488 +0,0 @@ -""" -An OrderedSet is a custom MutableSet that remembers its order, so that every -entry has an index that can be looked up. - -Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger, -and released under the MIT license. -""" -import itertools as it -from collections import deque - -try: - # Python 3 - from collections.abc import MutableSet, Sequence -except ImportError: - # Python 2.7 - from collections import MutableSet, Sequence - -SLICE_ALL = slice(None) -__version__ = "3.1" - - -def is_iterable(obj): - """ - Are we being asked to look up a list of things, instead of a single thing? - We check for the `__iter__` attribute so that this can cover types that - don't have to be known by this module, such as NumPy arrays. - - Strings, however, should be considered as atomic values to look up, not - iterables. The same goes for tuples, since they are immutable and therefore - valid entries. - - We don't need to check for the Python 2 `unicode` type, because it doesn't - have an `__iter__` attribute anyway. - """ - return ( - hasattr(obj, "__iter__") - and not isinstance(obj, str) - and not isinstance(obj, tuple) - ) - - -class OrderedSet(MutableSet, Sequence): - """ - An OrderedSet is a custom MutableSet that remembers its order, so that - every entry has an index that can be looked up. - - Example: - >>> OrderedSet([1, 1, 2, 3, 2]) - OrderedSet([1, 2, 3]) - """ - - def __init__(self, iterable=None): - self.items = [] - self.map = {} - if iterable is not None: - self |= iterable - - def __len__(self): - """ - Returns the number of unique elements in the ordered set - - Example: - >>> len(OrderedSet([])) - 0 - >>> len(OrderedSet([1, 2])) - 2 - """ - return len(self.items) - - def __getitem__(self, index): - """ - Get the item at a given index. - - If `index` is a slice, you will get back that slice of items, as a - new OrderedSet. - - If `index` is a list or a similar iterable, you'll get a list of - items corresponding to those indices. This is similar to NumPy's - "fancy indexing". The result is not an OrderedSet because you may ask - for duplicate indices, and the number of elements returned should be - the number of elements asked for. - - Example: - >>> oset = OrderedSet([1, 2, 3]) - >>> oset[1] - 2 - """ - if isinstance(index, slice) and index == SLICE_ALL: - return self.copy() - elif is_iterable(index): - return [self.items[i] for i in index] - elif hasattr(index, "__index__") or isinstance(index, slice): - result = self.items[index] - if isinstance(result, list): - return self.__class__(result) - else: - return result - else: - raise TypeError("Don't know how to index an OrderedSet by %r" % index) - - def copy(self): - """ - Return a shallow copy of this object. - - Example: - >>> this = OrderedSet([1, 2, 3]) - >>> other = this.copy() - >>> this == other - True - >>> this is other - False - """ - return self.__class__(self) - - def __getstate__(self): - if len(self) == 0: - # The state can't be an empty list. - # We need to return a truthy value, or else __setstate__ won't be run. - # - # This could have been done more gracefully by always putting the state - # in a tuple, but this way is backwards- and forwards- compatible with - # previous versions of OrderedSet. - return (None,) - else: - return list(self) - - def __setstate__(self, state): - if state == (None,): - self.__init__([]) - else: - self.__init__(state) - - def __contains__(self, key): - """ - Test if the item is in this ordered set - - Example: - >>> 1 in OrderedSet([1, 3, 2]) - True - >>> 5 in OrderedSet([1, 3, 2]) - False - """ - return key in self.map - - def add(self, key): - """ - Add `key` as an item to this OrderedSet, then return its index. - - If `key` is already in the OrderedSet, return the index it already - had. - - Example: - >>> oset = OrderedSet() - >>> oset.append(3) - 0 - >>> print(oset) - OrderedSet([3]) - """ - if key not in self.map: - self.map[key] = len(self.items) - self.items.append(key) - return self.map[key] - - append = add - - def update(self, sequence): - """ - Update the set with the given iterable sequence, then return the index - of the last element inserted. - - Example: - >>> oset = OrderedSet([1, 2, 3]) - >>> oset.update([3, 1, 5, 1, 4]) - 4 - >>> print(oset) - OrderedSet([1, 2, 3, 5, 4]) - """ - item_index = None - try: - for item in sequence: - item_index = self.add(item) - except TypeError: - raise ValueError( - "Argument needs to be an iterable, got %s" % type(sequence) - ) - return item_index - - def index(self, key): - """ - Get the index of a given entry, raising an IndexError if it's not - present. - - `key` can be an iterable of entries that is not a string, in which case - this returns a list of indices. - - Example: - >>> oset = OrderedSet([1, 2, 3]) - >>> oset.index(2) - 1 - """ - if is_iterable(key): - return [self.index(subkey) for subkey in key] - return self.map[key] - - # Provide some compatibility with pd.Index - get_loc = index - get_indexer = index - - def pop(self): - """ - Remove and return the last element from the set. - - Raises KeyError if the set is empty. - - Example: - >>> oset = OrderedSet([1, 2, 3]) - >>> oset.pop() - 3 - """ - if not self.items: - raise KeyError("Set is empty") - - elem = self.items[-1] - del self.items[-1] - del self.map[elem] - return elem - - def discard(self, key): - """ - Remove an element. Do not raise an exception if absent. - - The MutableSet mixin uses this to implement the .remove() method, which - *does* raise an error when asked to remove a non-existent item. - - Example: - >>> oset = OrderedSet([1, 2, 3]) - >>> oset.discard(2) - >>> print(oset) - OrderedSet([1, 3]) - >>> oset.discard(2) - >>> print(oset) - OrderedSet([1, 3]) - """ - if key in self: - i = self.map[key] - del self.items[i] - del self.map[key] - for k, v in self.map.items(): - if v >= i: - self.map[k] = v - 1 - - def clear(self): - """ - Remove all items from this OrderedSet. - """ - del self.items[:] - self.map.clear() - - def __iter__(self): - """ - Example: - >>> list(iter(OrderedSet([1, 2, 3]))) - [1, 2, 3] - """ - return iter(self.items) - - def __reversed__(self): - """ - Example: - >>> list(reversed(OrderedSet([1, 2, 3]))) - [3, 2, 1] - """ - return reversed(self.items) - - def __repr__(self): - if not self: - return "%s()" % (self.__class__.__name__,) - return "%s(%r)" % (self.__class__.__name__, list(self)) - - def __eq__(self, other): - """ - Returns true if the containers have the same items. If `other` is a - Sequence, then order is checked, otherwise it is ignored. - - Example: - >>> oset = OrderedSet([1, 3, 2]) - >>> oset == [1, 3, 2] - True - >>> oset == [1, 2, 3] - False - >>> oset == [2, 3] - False - >>> oset == OrderedSet([3, 2, 1]) - False - """ - # In Python 2 deque is not a Sequence, so treat it as one for - # consistent behavior with Python 3. - if isinstance(other, (Sequence, deque)): - # Check that this OrderedSet contains the same elements, in the - # same order, as the other object. - return list(self) == list(other) - try: - other_as_set = set(other) - except TypeError: - # If `other` can't be converted into a set, it's not equal. - return False - else: - return set(self) == other_as_set - - def union(self, *sets): - """ - Combines all unique items. - Each items order is defined by its first appearance. - - Example: - >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0]) - >>> print(oset) - OrderedSet([3, 1, 4, 5, 2, 0]) - >>> oset.union([8, 9]) - OrderedSet([3, 1, 4, 5, 2, 0, 8, 9]) - >>> oset | {10} - OrderedSet([3, 1, 4, 5, 2, 0, 10]) - """ - cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet - containers = map(list, it.chain([self], sets)) - items = it.chain.from_iterable(containers) - return cls(items) - - def __and__(self, other): - # the parent implementation of this is backwards - return self.intersection(other) - - def intersection(self, *sets): - """ - Returns elements in common between all sets. Order is defined only - by the first set. - - Example: - >>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3]) - >>> print(oset) - OrderedSet([1, 2, 3]) - >>> oset.intersection([2, 4, 5], [1, 2, 3, 4]) - OrderedSet([2]) - >>> oset.intersection() - OrderedSet([1, 2, 3]) - """ - cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet - if sets: - common = set.intersection(*map(set, sets)) - items = (item for item in self if item in common) - else: - items = self - return cls(items) - - def difference(self, *sets): - """ - Returns all elements that are in this set but not the others. - - Example: - >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2])) - OrderedSet([1, 3]) - >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3])) - OrderedSet([1]) - >>> OrderedSet([1, 2, 3]) - OrderedSet([2]) - OrderedSet([1, 3]) - >>> OrderedSet([1, 2, 3]).difference() - OrderedSet([1, 2, 3]) - """ - cls = self.__class__ - if sets: - other = set.union(*map(set, sets)) - items = (item for item in self if item not in other) - else: - items = self - return cls(items) - - def issubset(self, other): - """ - Report whether another set contains this set. - - Example: - >>> OrderedSet([1, 2, 3]).issubset({1, 2}) - False - >>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4}) - True - >>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5}) - False - """ - if len(self) > len(other): # Fast check for obvious cases - return False - return all(item in other for item in self) - - def issuperset(self, other): - """ - Report whether this set contains another set. - - Example: - >>> OrderedSet([1, 2]).issuperset([1, 2, 3]) - False - >>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3}) - True - >>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3}) - False - """ - if len(self) < len(other): # Fast check for obvious cases - return False - return all(item in self for item in other) - - def symmetric_difference(self, other): - """ - Return the symmetric difference of two OrderedSets as a new set. - That is, the new set will contain all elements that are in exactly - one of the sets. - - Their order will be preserved, with elements from `self` preceding - elements from `other`. - - Example: - >>> this = OrderedSet([1, 4, 3, 5, 7]) - >>> other = OrderedSet([9, 7, 1, 3, 2]) - >>> this.symmetric_difference(other) - OrderedSet([4, 5, 9, 2]) - """ - cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet - diff1 = cls(self).difference(other) - diff2 = cls(other).difference(self) - return diff1.union(diff2) - - def _update_items(self, items): - """ - Replace the 'items' list of this OrderedSet with a new one, updating - self.map accordingly. - """ - self.items = items - self.map = {item: idx for (idx, item) in enumerate(items)} - - def difference_update(self, *sets): - """ - Update this OrderedSet to remove items from one or more other sets. - - Example: - >>> this = OrderedSet([1, 2, 3]) - >>> this.difference_update(OrderedSet([2, 4])) - >>> print(this) - OrderedSet([1, 3]) - - >>> this = OrderedSet([1, 2, 3, 4, 5]) - >>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6])) - >>> print(this) - OrderedSet([3, 5]) - """ - items_to_remove = set() - for other in sets: - items_to_remove |= set(other) - self._update_items([item for item in self.items if item not in items_to_remove]) - - def intersection_update(self, other): - """ - Update this OrderedSet to keep only items in another set, preserving - their order in this set. - - Example: - >>> this = OrderedSet([1, 4, 3, 5, 7]) - >>> other = OrderedSet([9, 7, 1, 3, 2]) - >>> this.intersection_update(other) - >>> print(this) - OrderedSet([1, 3, 7]) - """ - other = set(other) - self._update_items([item for item in self.items if item in other]) - - def symmetric_difference_update(self, other): - """ - Update this OrderedSet to remove items from another set, then - add items from the other set that were not present in this set. - - Example: - >>> this = OrderedSet([1, 4, 3, 5, 7]) - >>> other = OrderedSet([9, 7, 1, 3, 2]) - >>> this.symmetric_difference_update(other) - >>> print(this) - OrderedSet([4, 5, 9, 2]) - """ - items_to_add = [item for item in other if item not in self] - items_to_remove = set(other) - self._update_items( - [item for item in self.items if item not in items_to_remove] + items_to_add - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/__about__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/__about__.py deleted file mode 100755 index c359122..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/__about__.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "21.2" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD-2-Clause or Apache-2.0" -__copyright__ = "2014-2019 %s" % __author__ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/__init__.py deleted file mode 100755 index 3c50c5d..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -from .__about__ import ( - __author__, - __copyright__, - __email__, - __license__, - __summary__, - __title__, - __uri__, - __version__, -) - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_manylinux.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_manylinux.py deleted file mode 100755 index 4c379aa..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_manylinux.py +++ /dev/null @@ -1,301 +0,0 @@ -import collections -import functools -import os -import re -import struct -import sys -import warnings -from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader: - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file: IO[bytes]) -> None: - def unpack(fmt: str) -> int: - try: - data = file.read(struct.calcsize(fmt)) - result: Tuple[int, ...] = struct.unpack(fmt, data) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result[0] - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header() -> Optional[_ELFFileHeader]: - try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header - - -def _is_linux_armhf() -> bool: - # hard-float ABI can be detected from the ELF header of the running - # process - # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686() -> bool: - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result - - -def _have_compatible_abi(arch: str) -> bool: - if arch == "armv7l": - return _is_linux_armhf() - if arch == "i686": - return _is_linux_i686() - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} - - -# If glibc ever changes its major version, we need to know what the last -# minor version was, so we can build the complete list of all versions. -# For now, guess what the highest minor version might be, assume it will -# be 50 for testing. Once this actually happens, update the dictionary -# with the actual value. -_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) - - -class _GLibCVersion(NamedTuple): - major: int - minor: int - - -def _glibc_version_string_confstr() -> Optional[str]: - """ - Primary implementation of glibc_version_string using os.confstr. - """ - # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely - # to be broken or missing. This strategy is used in the standard library - # platform module. - # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 - try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr("CS_GNU_LIBC_VERSION") - assert version_string is not None - _, version = version_string.split() - except (AssertionError, AttributeError, OSError, ValueError): - # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... - return None - return version - - -def _glibc_version_string_ctypes() -> Optional[str]: - """ - Fallback implementation of glibc_version_string using ctypes. - """ - try: - import ctypes - except ImportError: - return None - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - # - # We must also handle the special case where the executable is not a - # dynamically linked executable. This can occur when using musl libc, - # for example. In this situation, dlopen() will error, leading to an - # OSError. Interestingly, at least in the case of musl, there is no - # errno set on the OSError. The single string argument used to construct - # OSError comes from libc itself and is therefore not portable to - # hard code here. In any case, failure to call dlopen() means we - # can proceed, so we bail on our attempt. - try: - process_namespace = ctypes.CDLL(None) - except OSError: - return None - - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str: str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -def _glibc_version_string() -> Optional[str]: - """Returns glibc version string, or None if not using glibc.""" - return _glibc_version_string_confstr() or _glibc_version_string_ctypes() - - -def _parse_glibc_version(version_str: str) -> Tuple[int, int]: - """Parse glibc version. - - We use a regexp instead of str.split because we want to discard any - random junk that might come after the minor version -- this might happen - in patched/forked versions of glibc (e.g. Linaro's version of glibc - uses version strings like "2.20-2014.11"). See gh-3588. - """ - m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) - if not m: - warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, - RuntimeWarning, - ) - return -1, -1 - return int(m.group("major")), int(m.group("minor")) - - -@functools.lru_cache() -def _get_glibc_version() -> Tuple[int, int]: - version_str = _glibc_version_string() - if version_str is None: - return (-1, -1) - return _parse_glibc_version(version_str) - - -# From PEP 513, PEP 600 -def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: - sys_glibc = _get_glibc_version() - if sys_glibc < version: - return False - # Check for presence of _manylinux module. - try: - import _manylinux # noqa - except ImportError: - return True - if hasattr(_manylinux, "manylinux_compatible"): - result = _manylinux.manylinux_compatible(version[0], version[1], arch) - if result is not None: - return bool(result) - return True - if version == _GLibCVersion(2, 5): - if hasattr(_manylinux, "manylinux1_compatible"): - return bool(_manylinux.manylinux1_compatible) - if version == _GLibCVersion(2, 12): - if hasattr(_manylinux, "manylinux2010_compatible"): - return bool(_manylinux.manylinux2010_compatible) - if version == _GLibCVersion(2, 17): - if hasattr(_manylinux, "manylinux2014_compatible"): - return bool(_manylinux.manylinux2014_compatible) - return True - - -_LEGACY_MANYLINUX_MAP = { - # CentOS 7 w/ glibc 2.17 (PEP 599) - (2, 17): "manylinux2014", - # CentOS 6 w/ glibc 2.12 (PEP 571) - (2, 12): "manylinux2010", - # CentOS 5 w/ glibc 2.5 (PEP 513) - (2, 5): "manylinux1", -} - - -def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(arch): - return - # Oldest glibc to be supported regardless of architecture is (2, 17). - too_old_glibc2 = _GLibCVersion(2, 16) - if arch in {"x86_64", "i686"}: - # On x86/i686 also oldest glibc to be supported is (2, 5). - too_old_glibc2 = _GLibCVersion(2, 4) - current_glibc = _GLibCVersion(*_get_glibc_version()) - glibc_max_list = [current_glibc] - # We can assume compatibility across glibc major versions. - # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 - # - # Build a list of maximum glibc versions so that we can - # output the canonical list of all glibc from current_glibc - # down to too_old_glibc2, including all intermediary versions. - for glibc_major in range(current_glibc.major - 1, 1, -1): - glibc_minor = _LAST_GLIBC_MINOR[glibc_major] - glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_musllinux.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_musllinux.py deleted file mode 100755 index 85450fa..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_musllinux.py +++ /dev/null @@ -1,136 +0,0 @@ -"""PEP 656 support. - -This module implements logic to detect if the currently running Python is -linked against musl, and what musl version is used. -""" - -import contextlib -import functools -import operator -import os -import re -import struct -import subprocess -import sys -from typing import IO, Iterator, NamedTuple, Optional, Tuple - - -def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: - return struct.unpack(fmt, f.read(struct.calcsize(fmt))) - - -def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: - """Detect musl libc location by parsing the Python executable. - - Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca - ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html - """ - f.seek(0) - try: - ident = _read_unpacked(f, "16B") - except struct.error: - return None - if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. - return None - f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. - - try: - # e_fmt: Format for program header. - # p_fmt: Format for section header. - # p_idx: Indexes to find p_type, p_offset, and p_filesz. - e_fmt, p_fmt, p_idx = { - 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. - 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. - }[ident[4]] - except KeyError: - return None - else: - p_get = operator.itemgetter(*p_idx) - - # Find the interpreter section and return its content. - try: - _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) - except struct.error: - return None - for i in range(e_phnum + 1): - f.seek(e_phoff + e_phentsize * i) - try: - p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) - except struct.error: - return None - if p_type != 3: # Not PT_INTERP. - continue - f.seek(p_offset) - interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") - if "musl" not in interpreter: - return None - return interpreter - return None - - -class _MuslVersion(NamedTuple): - major: int - minor: int - - -def _parse_musl_version(output: str) -> Optional[_MuslVersion]: - lines = [n for n in (n.strip() for n in output.splitlines()) if n] - if len(lines) < 2 or lines[0][:4] != "musl": - return None - m = re.match(r"Version (\d+)\.(\d+)", lines[1]) - if not m: - return None - return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) - - -@functools.lru_cache() -def _get_musl_version(executable: str) -> Optional[_MuslVersion]: - """Detect currently-running musl runtime version. - - This is done by checking the specified executable's dynamic linking - information, and invoking the loader to parse its output for a version - string. If the loader is musl, the output would be something like:: - - musl libc (x86_64) - Version 1.2.2 - Dynamic Program Loader - """ - with contextlib.ExitStack() as stack: - try: - f = stack.enter_context(open(executable, "rb")) - except IOError: - return None - ld = _parse_ld_musl_from_elf(f) - if not ld: - return None - proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) - return _parse_musl_version(proc.stderr) - - -def platform_tags(arch: str) -> Iterator[str]: - """Generate musllinux tags compatible to the current platform. - - :param arch: Should be the part of platform tag after the ``linux_`` - prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a - prerequisite for the current platform to be musllinux-compatible. - - :returns: An iterator of compatible musllinux tags. - """ - sys_musl = _get_musl_version(sys.executable) - if sys_musl is None: # Python not dynamically linked against musl. - return - for minor in range(sys_musl.minor, -1, -1): - yield f"musllinux_{sys_musl.major}_{minor}_{arch}" - - -if __name__ == "__main__": # pragma: no cover - import sysconfig - - plat = sysconfig.get_platform() - assert plat.startswith("linux-"), "not linux" - - print("plat:", plat) - print("musl:", _get_musl_version(sys.executable)) - print("tags:", end=" ") - for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): - print(t, end="\n ") diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_structures.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_structures.py deleted file mode 100755 index 9515497..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/_structures.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - - -class InfinityType: - def __repr__(self) -> str: - return "Infinity" - - def __hash__(self) -> int: - return hash(repr(self)) - - def __lt__(self, other: object) -> bool: - return False - - def __le__(self, other: object) -> bool: - return False - - def __eq__(self, other: object) -> bool: - return isinstance(other, self.__class__) - - def __ne__(self, other: object) -> bool: - return not isinstance(other, self.__class__) - - def __gt__(self, other: object) -> bool: - return True - - def __ge__(self, other: object) -> bool: - return True - - def __neg__(self: object) -> "NegativeInfinityType": - return NegativeInfinity - - -Infinity = InfinityType() - - -class NegativeInfinityType: - def __repr__(self) -> str: - return "-Infinity" - - def __hash__(self) -> int: - return hash(repr(self)) - - def __lt__(self, other: object) -> bool: - return True - - def __le__(self, other: object) -> bool: - return True - - def __eq__(self, other: object) -> bool: - return isinstance(other, self.__class__) - - def __ne__(self, other: object) -> bool: - return not isinstance(other, self.__class__) - - def __gt__(self, other: object) -> bool: - return False - - def __ge__(self, other: object) -> bool: - return False - - def __neg__(self: object) -> InfinityType: - return Infinity - - -NegativeInfinity = NegativeInfinityType() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/markers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/markers.py deleted file mode 100755 index eb0541b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/markers.py +++ /dev/null @@ -1,304 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import operator -import os -import platform -import sys -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -from setuptools.extern.pyparsing import ( # noqa: N817 - Forward, - Group, - Literal as L, - ParseException, - ParseResults, - QuotedString, - ZeroOrMore, - stringEnd, - stringStart, -) - -from .specifiers import InvalidSpecifier, Specifier - -__all__ = [ - "InvalidMarker", - "UndefinedComparison", - "UndefinedEnvironmentName", - "Marker", - "default_environment", -] - -Operator = Callable[[str, str], bool] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node: - def __init__(self, value: Any) -> None: - self.value = value - - def __str__(self) -> str: - return str(self.value) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}('{self}')>" - - def serialize(self) -> str: - raise NotImplementedError - - -class Variable(Node): - def serialize(self) -> str: - return str(self) - - -class Value(Node): - def serialize(self) -> str: - return f'"{self}"' - - -class Op(Node): - def serialize(self) -> str: - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") # PEP-345 - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # undocumented setuptools legacy - | L("extra") # PEP-508 -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker( - marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True -) -> str: - - assert isinstance(marker, (list, tuple, str)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if ( - isinstance(marker, list) - and len(marker) == 1 - and isinstance(marker[0], (list, tuple)) - ): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators: Dict[str, Operator] = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs: str, op: Op, rhs: str) -> bool: - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper: Optional[Operator] = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") - - return oper(lhs, rhs) - - -class Undefined: - pass - - -_undefined = Undefined() - - -def _get_env(environment: Dict[str, str], name: str) -> str: - value: Union[str, Undefined] = environment.get(name, _undefined) - - if isinstance(value, Undefined): - raise UndefinedEnvironmentName( - f"{name!r} does not exist in evaluation environment." - ) - - return value - - -def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: - groups: List[List[bool]] = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, str)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info: "sys._version_info") -> str: - version = "{0.major}.{0.minor}.{0.micro}".format(info) - kind = info.releaselevel - if kind != "final": - version += kind[0] + str(info.serial) - return version - - -def default_environment() -> Dict[str, str]: - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": ".".join(platform.python_version_tuple()[:2]), - "sys_platform": sys.platform, - } - - -class Marker: - def __init__(self, marker: str) -> None: - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - raise InvalidMarker( - f"Invalid marker: {marker!r}, parse error at " - f"{marker[e.loc : e.loc + 8]!r}" - ) - - def __str__(self) -> str: - return _format_marker(self._markers) - - def __repr__(self) -> str: - return f"" - - def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/requirements.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/requirements.py deleted file mode 100755 index 0d93231..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/requirements.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -import string -import urllib.parse -from typing import List, Optional as TOptional, Set - -from setuptools.extern.pyparsing import ( # noqa - Combine, - Literal as L, - Optional, - ParseException, - Regex, - Word, - ZeroOrMore, - originalTextFor, - stringEnd, - stringStart, -) - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - - -class InvalidRequirement(ValueError): - """ - An invalid requirement was found, users should refer to PEP 508. - """ - - -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r"[^ ]+")("url") -URL = AT + URI - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine( - VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False -)("_raw_spec") -_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start : t._original_end]) -) -MARKER_SEPARATOR = SEMICOLON -MARKER = MARKER_SEPARATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd -# setuptools.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see -# issue #104 -REQUIREMENT.parseString("x[]") - - -class Requirement: - """Parse a requirement. - - Parse a given requirement string into its parts, such as name, specifier, - URL, and extras. Raises InvalidRequirement on a badly-formed requirement - string. - """ - - # TODO: Can we test whether something is contained within a requirement? - # If so how do we do that? Do we need to test against the _name_ of - # the thing as well as the version? What about the markers? - # TODO: Can we normalize the name and extra name? - - def __init__(self, requirement_string: str) -> None: - try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' - ) - - self.name: str = req.name - if req.url: - parsed_url = urllib.parse.urlparse(req.url) - if parsed_url.scheme == "file": - if urllib.parse.urlunparse(parsed_url) != req.url: - raise InvalidRequirement("Invalid URL given") - elif not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc - ): - raise InvalidRequirement(f"Invalid URL: {req.url}") - self.url: TOptional[str] = req.url - else: - self.url = None - self.extras: Set[str] = set(req.extras.asList() if req.extras else []) - self.specifier: SpecifierSet = SpecifierSet(req.specifier) - self.marker: TOptional[Marker] = req.marker if req.marker else None - - def __str__(self) -> str: - parts: List[str] = [self.name] - - if self.extras: - formatted_extras = ",".join(sorted(self.extras)) - parts.append(f"[{formatted_extras}]") - - if self.specifier: - parts.append(str(self.specifier)) - - if self.url: - parts.append(f"@ {self.url}") - if self.marker: - parts.append(" ") - - if self.marker: - parts.append(f"; {self.marker}") - - return "".join(parts) - - def __repr__(self) -> str: - return f"" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/specifiers.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/specifiers.py deleted file mode 100755 index ce66bd4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/specifiers.py +++ /dev/null @@ -1,828 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import abc -import functools -import itertools -import re -import warnings -from typing import ( - Callable, - Dict, - Iterable, - Iterator, - List, - Optional, - Pattern, - Set, - Tuple, - TypeVar, - Union, -) - -from .utils import canonicalize_version -from .version import LegacyVersion, Version, parse - -ParsedVersion = Union[Version, LegacyVersion] -UnparsedVersion = Union[Version, LegacyVersion, str] -VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) -CallableOperator = Callable[[ParsedVersion, str], bool] - - -class InvalidSpecifier(ValueError): - """ - An invalid specifier was found, users should refer to PEP 440. - """ - - -class BaseSpecifier(metaclass=abc.ABCMeta): - @abc.abstractmethod - def __str__(self) -> str: - """ - Returns the str representation of this Specifier like object. This - should be representative of the Specifier itself. - """ - - @abc.abstractmethod - def __hash__(self) -> int: - """ - Returns a hash value for this Specifier like object. - """ - - @abc.abstractmethod - def __eq__(self, other: object) -> bool: - """ - Returns a boolean representing whether or not the two Specifier like - objects are equal. - """ - - @abc.abstractmethod - def __ne__(self, other: object) -> bool: - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - - @abc.abstractproperty - def prereleases(self) -> Optional[bool]: - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @prereleases.setter - def prereleases(self, value: bool) -> None: - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @abc.abstractmethod - def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: - """ - Determines if the given item is contained within this specifier. - """ - - @abc.abstractmethod - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - """ - Takes an iterable of items and filters them so that only items which - are contained within this specifier are allowed in it. - """ - - -class _IndividualSpecifier(BaseSpecifier): - - _operators: Dict[str, str] = {} - _regex: Pattern[str] - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier(f"Invalid specifier: '{spec}'") - - self._spec: Tuple[str, str] = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre) - - def __str__(self) -> str: - return "{}{}".format(*self._spec) - - @property - def _canonical_spec(self) -> Tuple[str, str]: - return self._spec[0], canonicalize_version(self._spec[1]) - - def __hash__(self) -> int: - return hash(self._canonical_spec) - - def __eq__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._canonical_spec == other._canonical_spec - - def __ne__(self, other: object) -> bool: - if isinstance(other, str): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - - def _get_operator(self, op: str) -> CallableOperator: - operator_callable: CallableOperator = getattr( - self, f"_compare_{self._operators[op]}" - ) - return operator_callable - - def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self) -> str: - return self._spec[0] - - @property - def version(self) -> str: - return self._spec[1] - - @property - def prereleases(self) -> Optional[bool]: - return self._prereleases - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: str) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - normalized_item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if normalized_item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - operator_callable: CallableOperator = self._get_operator(self.operator) - return operator_callable(normalized_item, self.version) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later in case nothing - # else matches this specifier. - if parsed_version.is_prerelease and not ( - prereleases or self.prereleases - ): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the beginning. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(==|!=|<=|>=|<|>)) - \s* - (?P - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - - _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: - super().__init__(spec, prereleases) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal( - self, prospective: LegacyVersion, spec: str - ) -> bool: - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: - return prospective > self._coerce_version(spec) - - -def _require_version_compare( - fn: Callable[["Specifier", ParsedVersion, str], bool] -) -> Callable[["Specifier", ParsedVersion, str], bool]: - @functools.wraps(fn) - def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = r""" - (?P(~=|==|!=|<=|>=|<|>|===)) - (?P - (?: - # The identity operators allow for an escape hatch that will - # do an exact string match of the version you wish to install. - # This will not be parsed by PEP 440 and we cannot determine - # any semantic meaning from it. This operator is discouraged - # but included entirely as an escape hatch. - (?<====) # Only match for the identity operator - \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. - ) - | - (?: - # The (non)equality operators allow for wild card and local - # versions to be specified so we have to define these two - # operators separately to enable that. - (?<===|!=) # Only match for equals and not equals - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. - (?: - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* - )? - ) - | - (?: - # The compatible operator requires at least two digits in the - # release segment. - (?<=~=) # Only match for the compatible operator - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - | - (?: - # All other operators only allow a sub set of what the - # (non)equality operators do. Specifically they do not allow - # local versions to be specified nor do they allow the prefix - # matching wild cards. - (?=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - "===": "arbitrary", - } - - @_require_version_compare - def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: - - # Compatible releases have an equivalent combination of >= and ==. That - # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to - # implement this in terms of the other specifiers instead of - # implementing it ourselves. The only thing we need to do is construct - # the other specifiers. - - # We want everything but the last item in the version, but we want to - # ignore suffix segments. - prefix = ".".join( - list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] - ) - - # Add the prefix notation to the end of our string - prefix += ".*" - - return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( - prospective, prefix - ) - - @_require_version_compare - def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: - - # We need special logic to handle prefix matching - if spec.endswith(".*"): - # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. - split_spec = _version_split(spec[:-2]) # Remove the trailing .* - - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. - split_prospective = _version_split(str(prospective)) - - # Shorten the prospective version to be the same length as the spec - # so that we can determine if the specifier is a prefix of the - # prospective version or not. - shortened_prospective = split_prospective[: len(split_spec)] - - # Pad out our two sides with zeros so that they both equal the same - # length. - padded_spec, padded_prospective = _pad_version( - split_spec, shortened_prospective - ) - - return padded_prospective == padded_spec - else: - # Convert our spec string into a Version - spec_version = Version(spec) - - # If the specifier does not have a local segment, then we want to - # act as if the prospective version also does not have a local - # segment. - if not spec_version.local: - prospective = Version(prospective.public) - - return prospective == spec_version - - @_require_version_compare - def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: - return not self._compare_equal(prospective, spec) - - @_require_version_compare - def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: - - # NB: Local version identifiers are NOT permitted in the version - # specifier, so local version labels can be universally removed from - # the prospective version. - return Version(prospective.public) <= Version(spec) - - @_require_version_compare - def _compare_greater_than_equal( - self, prospective: ParsedVersion, spec: str - ) -> bool: - - # NB: Local version identifiers are NOT permitted in the version - # specifier, so local version labels can be universally removed from - # the prospective version. - return Version(prospective.public) >= Version(spec) - - @_require_version_compare - def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: - - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec_str) - - # Check to see if the prospective version is less than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective < spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a pre-release version, that we do not accept pre-release - # versions for the version mentioned in the specifier (e.g. <3.1 should - # not match 3.1.dev0, but should match 3.0.dev0). - if not spec.is_prerelease and prospective.is_prerelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # less than the spec version *and* it's not a pre-release of the same - # version in the spec. - return True - - @_require_version_compare - def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: - - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec_str) - - # Check to see if the prospective version is greater than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective > spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a post-release version, that we do not accept - # post-release versions for the version mentioned in the specifier - # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). - if not spec.is_postrelease and prospective.is_postrelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is technically greater than, to match. - if prospective.local is not None: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # greater than the spec version *and* it's not a pre-release of the - # same version in the spec. - return True - - def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: - return str(prospective).lower() == str(spec).lower() - - @property - def prereleases(self) -> bool: - - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases - - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] - - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True - - return False - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - -_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") - - -def _version_split(version: str) -> List[str]: - result: List[str] = [] - for item in version.split("."): - match = _prefix_regex.search(item) - if match: - result.extend(match.groups()) - else: - result.append(item) - return result - - -def _is_not_suffix(segment: str) -> bool: - return not any( - segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") - ) - - -def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: - left_split, right_split = [], [] - - # Get the release segment of our versions - left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) - right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) - - # Get the rest of our versions - left_split.append(left[len(left_split[0]) :]) - right_split.append(right[len(right_split[0]) :]) - - # Insert our padding - left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) - right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) - - return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) - - -class SpecifierSet(BaseSpecifier): - def __init__( - self, specifiers: str = "", prereleases: Optional[bool] = None - ) -> None: - - # Split on , to break each individual specifier into it's own item, and - # strip each item to remove leading/trailing whitespace. - split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - - # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed: Set[_IndividualSpecifier] = set() - for specifier in split_specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) - - # Store our prereleases value so we can use it later to determine if - # we accept prereleases or not. - self._prereleases = prereleases - - def __repr__(self) -> str: - pre = ( - f", prereleases={self.prereleases!r}" - if self._prereleases is not None - else "" - ) - - return "".format(str(self), pre) - - def __str__(self) -> str: - return ",".join(sorted(str(s) for s in self._specs)) - - def __hash__(self) -> int: - return hash(self._specs) - - def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": - if isinstance(other, str): - other = SpecifierSet(other) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - specifier = SpecifierSet() - specifier._specs = frozenset(self._specs | other._specs) - - if self._prereleases is None and other._prereleases is not None: - specifier._prereleases = other._prereleases - elif self._prereleases is not None and other._prereleases is None: - specifier._prereleases = self._prereleases - elif self._prereleases == other._prereleases: - specifier._prereleases = self._prereleases - else: - raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." - ) - - return specifier - - def __eq__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs == other._specs - - def __ne__(self, other: object) -> bool: - if isinstance(other, (str, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - - def __len__(self) -> int: - return len(self._specs) - - def __iter__(self) -> Iterator[_IndividualSpecifier]: - return iter(self._specs) - - @property - def prereleases(self) -> Optional[bool]: - - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value: bool) -> None: - self._prereleases = value - - def __contains__(self, item: UnparsedVersion) -> bool: - return self.contains(item) - - def contains( - self, item: UnparsedVersion, prereleases: Optional[bool] = None - ) -> bool: - - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # We can determine if we're going to allow pre-releases by looking to - # see if any of the underlying items supports them. If none of them do - # and this item is a pre-release then we do not allow it and we can - # short circuit that here. - # Note: This means that 1.0.dev1 would not be contained in something - # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 - if not prereleases and item.is_prerelease: - return False - - # We simply dispatch to the underlying specs here to make sure that the - # given version is contained within all of them. - # Note: This use of all() here means that an empty set of specifiers - # will always return True, this is an explicit design decision. - return all(s.contains(item, prereleases=prereleases) for s in self._specs) - - def filter( - self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None - ) -> Iterable[VersionTypeVar]: - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # If we have any specifiers, then we want to wrap our iterable in the - # filter method for each one, this will act as a logical AND amongst - # each specifier. - if self._specs: - for spec in self._specs: - iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable - # If we do not have any specifiers, then we need to have a rough filter - # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. - else: - filtered: List[VersionTypeVar] = [] - found_prereleases: List[VersionTypeVar] = [] - - item: UnparsedVersion - parsed_version: Union[Version, LegacyVersion] - - for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue - - # Store any item which is a pre-release for later unless we've - # already found a final version or we are accepting prereleases - if parsed_version.is_prerelease and not prereleases: - if not filtered: - found_prereleases.append(item) - else: - filtered.append(item) - - # If we've found no items except for pre-releases, then we'll go - # ahead and use the pre-releases - if not filtered and found_prereleases and prereleases is None: - return found_prereleases - - return filtered diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/tags.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/tags.py deleted file mode 100755 index e65890a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/tags.py +++ /dev/null @@ -1,484 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import logging -import platform -import sys -import sysconfig -from importlib.machinery import EXTENSION_SUFFIXES -from typing import ( - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Optional, - Sequence, - Tuple, - Union, - cast, -) - -from . import _manylinux, _musllinux - -logger = logging.getLogger(__name__) - -PythonVersion = Sequence[int] -MacVersion = Tuple[int, int] - -INTERPRETER_SHORT_NAMES: Dict[str, str] = { - "python": "py", # Generic. - "cpython": "cp", - "pypy": "pp", - "ironpython": "ip", - "jython": "jy", -} - - -_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 - - -class Tag: - """ - A representation of the tag triple for a wheel. - - Instances are considered immutable and thus are hashable. Equality checking - is also supported. - """ - - __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] - - def __init__(self, interpreter: str, abi: str, platform: str) -> None: - self._interpreter = interpreter.lower() - self._abi = abi.lower() - self._platform = platform.lower() - # The __hash__ of every single element in a Set[Tag] will be evaluated each time - # that a set calls its `.disjoint()` method, which may be called hundreds of - # times when scanning a page of links for packages with tags matching that - # Set[Tag]. Pre-computing the value here produces significant speedups for - # downstream consumers. - self._hash = hash((self._interpreter, self._abi, self._platform)) - - @property - def interpreter(self) -> str: - return self._interpreter - - @property - def abi(self) -> str: - return self._abi - - @property - def platform(self) -> str: - return self._platform - - def __eq__(self, other: object) -> bool: - if not isinstance(other, Tag): - return NotImplemented - - return ( - (self._hash == other._hash) # Short-circuit ASAP for perf reasons. - and (self._platform == other._platform) - and (self._abi == other._abi) - and (self._interpreter == other._interpreter) - ) - - def __hash__(self) -> int: - return self._hash - - def __str__(self) -> str: - return f"{self._interpreter}-{self._abi}-{self._platform}" - - def __repr__(self) -> str: - return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) - - -def parse_tag(tag: str) -> FrozenSet[Tag]: - """ - Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. - - Returning a set is required due to the possibility that the tag is a - compressed tag set. - """ - tags = set() - interpreters, abis, platforms = tag.split("-") - for interpreter in interpreters.split("."): - for abi in abis.split("."): - for platform_ in platforms.split("."): - tags.add(Tag(interpreter, abi, platform_)) - return frozenset(tags) - - -def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: - value = sysconfig.get_config_var(name) - if value is None and warn: - logger.debug( - "Config variable '%s' is unset, Python ABI tag may be incorrect", name - ) - return value - - -def _normalize_string(string: str) -> str: - return string.replace(".", "_").replace("-", "_") - - -def _abi3_applies(python_version: PythonVersion) -> bool: - """ - Determine if the Python version supports abi3. - - PEP 384 was first implemented in Python 3.2. - """ - return len(python_version) > 1 and tuple(python_version) >= (3, 2) - - -def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: - py_version = tuple(py_version) # To allow for version comparison. - abis = [] - version = _version_nodot(py_version[:2]) - debug = pymalloc = ucs4 = "" - with_debug = _get_config_var("Py_DEBUG", warn) - has_refcount = hasattr(sys, "gettotalrefcount") - # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled - # extension modules is the best option. - # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 - has_ext = "_d.pyd" in EXTENSION_SUFFIXES - if with_debug or (with_debug is None and (has_refcount or has_ext)): - debug = "d" - if py_version < (3, 8): - with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) - if with_pymalloc or with_pymalloc is None: - pymalloc = "m" - if py_version < (3, 3): - unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) - if unicode_size == 4 or ( - unicode_size is None and sys.maxunicode == 0x10FFFF - ): - ucs4 = "u" - elif debug: - # Debug builds can also load "normal" extension modules. - # We can also assume no UCS-4 or pymalloc requirement. - abis.append(f"cp{version}") - abis.insert( - 0, - "cp{version}{debug}{pymalloc}{ucs4}".format( - version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 - ), - ) - return abis - - -def cpython_tags( - python_version: Optional[PythonVersion] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a CPython interpreter. - - The tags consist of: - - cp-- - - cp-abi3- - - cp-none- - - cp-abi3- # Older Python versions down to 3.2. - - If python_version only specifies a major version then user-provided ABIs and - the 'none' ABItag will be used. - - If 'abi3' or 'none' are specified in 'abis' then they will be yielded at - their normal position and not at the beginning. - """ - if not python_version: - python_version = sys.version_info[:2] - - interpreter = "cp{}".format(_version_nodot(python_version[:2])) - - if abis is None: - if len(python_version) > 1: - abis = _cpython_abis(python_version, warn) - else: - abis = [] - abis = list(abis) - # 'abi3' and 'none' are explicitly handled later. - for explicit_abi in ("abi3", "none"): - try: - abis.remove(explicit_abi) - except ValueError: - pass - - platforms = list(platforms or platform_tags()) - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - if _abi3_applies(python_version): - yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) - yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) - - if _abi3_applies(python_version): - for minor_version in range(python_version[1] - 1, 1, -1): - for platform_ in platforms: - interpreter = "cp{version}".format( - version=_version_nodot((python_version[0], minor_version)) - ) - yield Tag(interpreter, "abi3", platform_) - - -def _generic_abi() -> Iterator[str]: - abi = sysconfig.get_config_var("SOABI") - if abi: - yield _normalize_string(abi) - - -def generic_tags( - interpreter: Optional[str] = None, - abis: Optional[Iterable[str]] = None, - platforms: Optional[Iterable[str]] = None, - *, - warn: bool = False, -) -> Iterator[Tag]: - """ - Yields the tags for a generic interpreter. - - The tags consist of: - - -- - - The "none" ABI will be added if it was not explicitly provided. - """ - if not interpreter: - interp_name = interpreter_name() - interp_version = interpreter_version(warn=warn) - interpreter = "".join([interp_name, interp_version]) - if abis is None: - abis = _generic_abi() - platforms = list(platforms or platform_tags()) - abis = list(abis) - if "none" not in abis: - abis.append("none") - for abi in abis: - for platform_ in platforms: - yield Tag(interpreter, abi, platform_) - - -def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: - """ - Yields Python versions in descending order. - - After the latest version, the major-only version will be yielded, and then - all previous versions of that major version. - """ - if len(py_version) > 1: - yield "py{version}".format(version=_version_nodot(py_version[:2])) - yield "py{major}".format(major=py_version[0]) - if len(py_version) > 1: - for minor in range(py_version[1] - 1, -1, -1): - yield "py{version}".format(version=_version_nodot((py_version[0], minor))) - - -def compatible_tags( - python_version: Optional[PythonVersion] = None, - interpreter: Optional[str] = None, - platforms: Optional[Iterable[str]] = None, -) -> Iterator[Tag]: - """ - Yields the sequence of tags that are compatible with a specific version of Python. - - The tags consist of: - - py*-none- - - -none-any # ... if `interpreter` is provided. - - py*-none-any - """ - if not python_version: - python_version = sys.version_info[:2] - platforms = list(platforms or platform_tags()) - for version in _py_interpreter_range(python_version): - for platform_ in platforms: - yield Tag(version, "none", platform_) - if interpreter: - yield Tag(interpreter, "none", "any") - for version in _py_interpreter_range(python_version): - yield Tag(version, "none", "any") - - -def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: - if not is_32bit: - return arch - - if arch.startswith("ppc"): - return "ppc" - - return "i386" - - -def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: - formats = [cpu_arch] - if cpu_arch == "x86_64": - if version < (10, 4): - return [] - formats.extend(["intel", "fat64", "fat32"]) - - elif cpu_arch == "i386": - if version < (10, 4): - return [] - formats.extend(["intel", "fat32", "fat"]) - - elif cpu_arch == "ppc64": - # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? - if version > (10, 5) or version < (10, 4): - return [] - formats.append("fat64") - - elif cpu_arch == "ppc": - if version > (10, 6): - return [] - formats.extend(["fat32", "fat"]) - - if cpu_arch in {"arm64", "x86_64"}: - formats.append("universal2") - - if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: - formats.append("universal") - - return formats - - -def mac_platforms( - version: Optional[MacVersion] = None, arch: Optional[str] = None -) -> Iterator[str]: - """ - Yields the platform tags for a macOS system. - - The `version` parameter is a two-item tuple specifying the macOS version to - generate platform tags for. The `arch` parameter is the CPU architecture to - generate platform tags for. Both parameters default to the appropriate value - for the current system. - """ - version_str, _, cpu_arch = platform.mac_ver() - if version is None: - version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) - else: - version = version - if arch is None: - arch = _mac_arch(cpu_arch) - else: - arch = arch - - if (10, 0) <= version and version < (11, 0): - # Prior to Mac OS 11, each yearly release of Mac OS bumped the - # "minor" version number. The major version was always 10. - for minor_version in range(version[1], -1, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=10, minor=minor_version, binary_format=binary_format - ) - - if version >= (11, 0): - # Starting with Mac OS 11, each yearly release bumps the major version - # number. The minor versions are now the midyear updates. - for major_version in range(version[0], 10, -1): - compat_version = major_version, 0 - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=major_version, minor=0, binary_format=binary_format - ) - - if version >= (11, 0): - # Mac OS 11 on x86_64 is compatible with binaries from previous releases. - # Arm64 support was introduced in 11.0, so no Arm binaries from previous - # releases exist. - # - # However, the "universal2" binary format can have a - # macOS version earlier than 11.0 when the x86_64 part of the binary supports - # that version of macOS. - if arch == "x86_64": - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_formats = _mac_binary_formats(compat_version, arch) - for binary_format in binary_formats: - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - else: - for minor_version in range(16, 3, -1): - compat_version = 10, minor_version - binary_format = "universal2" - yield "macosx_{major}_{minor}_{binary_format}".format( - major=compat_version[0], - minor=compat_version[1], - binary_format=binary_format, - ) - - -def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: - linux = _normalize_string(sysconfig.get_platform()) - if is_32bit: - if linux == "linux_x86_64": - linux = "linux_i686" - elif linux == "linux_aarch64": - linux = "linux_armv7l" - _, arch = linux.split("_", 1) - yield from _manylinux.platform_tags(linux, arch) - yield from _musllinux.platform_tags(arch) - yield linux - - -def _generic_platforms() -> Iterator[str]: - yield _normalize_string(sysconfig.get_platform()) - - -def platform_tags() -> Iterator[str]: - """ - Provides the platform tags for this installation. - """ - if platform.system() == "Darwin": - return mac_platforms() - elif platform.system() == "Linux": - return _linux_platforms() - else: - return _generic_platforms() - - -def interpreter_name() -> str: - """ - Returns the name of the running interpreter. - """ - name = sys.implementation.name - return INTERPRETER_SHORT_NAMES.get(name) or name - - -def interpreter_version(*, warn: bool = False) -> str: - """ - Returns the version of the running interpreter. - """ - version = _get_config_var("py_version_nodot", warn=warn) - if version: - version = str(version) - else: - version = _version_nodot(sys.version_info[:2]) - return version - - -def _version_nodot(version: PythonVersion) -> str: - return "".join(map(str, version)) - - -def sys_tags(*, warn: bool = False) -> Iterator[Tag]: - """ - Returns the sequence of tag triples for the running interpreter. - - The order of the sequence corresponds to priority order for the - interpreter, from most to least important. - """ - - interp_name = interpreter_name() - if interp_name == "cp": - yield from cpython_tags(warn=warn) - else: - yield from generic_tags() - - yield from compatible_tags() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/utils.py deleted file mode 100755 index bab11b8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/utils.py +++ /dev/null @@ -1,136 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -from typing import FrozenSet, NewType, Tuple, Union, cast - -from .tags import Tag, parse_tag -from .version import InvalidVersion, Version - -BuildTag = Union[Tuple[()], Tuple[int, str]] -NormalizedName = NewType("NormalizedName", str) - - -class InvalidWheelFilename(ValueError): - """ - An invalid wheel filename was found, users should refer to PEP 427. - """ - - -class InvalidSdistFilename(ValueError): - """ - An invalid sdist filename was found, users should refer to the packaging user guide. - """ - - -_canonicalize_regex = re.compile(r"[-_.]+") -# PEP 427: The build number must start with a digit. -_build_tag_regex = re.compile(r"(\d+)(.*)") - - -def canonicalize_name(name: str) -> NormalizedName: - # This is taken from PEP 503. - value = _canonicalize_regex.sub("-", name).lower() - return cast(NormalizedName, value) - - -def canonicalize_version(version: Union[Version, str]) -> str: - """ - This is very similar to Version.__str__, but has one subtle difference - with the way it handles the release segment. - """ - if isinstance(version, str): - try: - parsed = Version(version) - except InvalidVersion: - # Legacy versions cannot be normalized - return version - else: - parsed = version - - parts = [] - - # Epoch - if parsed.epoch != 0: - parts.append(f"{parsed.epoch}!") - - # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) - - # Pre-release - if parsed.pre is not None: - parts.append("".join(str(x) for x in parsed.pre)) - - # Post-release - if parsed.post is not None: - parts.append(f".post{parsed.post}") - - # Development release - if parsed.dev is not None: - parts.append(f".dev{parsed.dev}") - - # Local version segment - if parsed.local is not None: - parts.append(f"+{parsed.local}") - - return "".join(parts) - - -def parse_wheel_filename( - filename: str, -) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: - if not filename.endswith(".whl"): - raise InvalidWheelFilename( - f"Invalid wheel filename (extension must be '.whl'): {filename}" - ) - - filename = filename[:-4] - dashes = filename.count("-") - if dashes not in (4, 5): - raise InvalidWheelFilename( - f"Invalid wheel filename (wrong number of parts): {filename}" - ) - - parts = filename.split("-", dashes - 2) - name_part = parts[0] - # See PEP 427 for the rules on escaping the project name - if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: - raise InvalidWheelFilename(f"Invalid project name: {filename}") - name = canonicalize_name(name_part) - version = Version(parts[1]) - if dashes == 5: - build_part = parts[2] - build_match = _build_tag_regex.match(build_part) - if build_match is None: - raise InvalidWheelFilename( - f"Invalid build number: {build_part} in '{filename}'" - ) - build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) - else: - build = () - tags = parse_tag(parts[-1]) - return (name, version, build, tags) - - -def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: - if filename.endswith(".tar.gz"): - file_stem = filename[: -len(".tar.gz")] - elif filename.endswith(".zip"): - file_stem = filename[: -len(".zip")] - else: - raise InvalidSdistFilename( - f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" - f" {filename}" - ) - - # We are requiring a PEP 440 version, which cannot contain dashes, - # so we split on the last dash. - name_part, sep, version_part = file_stem.rpartition("-") - if not sep: - raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") - - name = canonicalize_name(name_part) - version = Version(version_part) - return (name, version) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/version.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/version.py deleted file mode 100755 index de9a09a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/packaging/version.py +++ /dev/null @@ -1,504 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import collections -import itertools -import re -import warnings -from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union - -from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType - -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] - -InfiniteTypes = Union[InfinityType, NegativeInfinityType] -PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] -SubLocalType = Union[InfiniteTypes, int, str] -LocalType = Union[ - NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], -] -CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType -] -LegacyCmpKey = Tuple[int, Tuple[str, ...]] -VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool -] - -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) - - -def parse(version: str) -> Union["LegacyVersion", "Version"]: - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion: - _key: Union[CmpKey, LegacyCmpKey] - - def __hash__(self) -> int: - return hash(self._key) - - # Please keep the duplicated `isinstance` check - # in the six comparisons hereunder - # unless you find a way to avoid adding overhead function calls. - def __lt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key < other._key - - def __le__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key <= other._key - - def __eq__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key == other._key - - def __ge__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key >= other._key - - def __gt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key > other._key - - def __ne__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key != other._key - - -class LegacyVersion(_BaseVersion): - def __init__(self, version: str) -> None: - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def __str__(self) -> str: - return self._version - - def __repr__(self) -> str: - return f"" - - @property - def public(self) -> str: - return self._version - - @property - def base_version(self) -> str: - return self._version - - @property - def epoch(self) -> int: - return -1 - - @property - def release(self) -> None: - return None - - @property - def pre(self) -> None: - return None - - @property - def post(self) -> None: - return None - - @property - def dev(self) -> None: - return None - - @property - def local(self) -> None: - return None - - @property - def is_prerelease(self) -> bool: - return False - - @property - def is_postrelease(self) -> bool: - return False - - @property - def is_devrelease(self) -> bool: - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s: str) -> Iterator[str]: - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version: str) -> LegacyCmpKey: - - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts: List[str] = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - - return epoch, tuple(parts) - - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P[0-9]+)!)? # epoch - (?P[0-9]+(?:\.[0-9]+)*) # release segment - (?P
                                          # pre-release
-            [-_\.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-        (?P                                         # post release
-            (?:-(?P[0-9]+))
-            |
-            (?:
-                [-_\.]?
-                (?Ppost|rev|r)
-                [-_\.]?
-                (?P[0-9]+)?
-            )
-        )?
-        (?P                                          # dev release
-            [-_\.]?
-            (?Pdev)
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-    )
-    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
-"""
-
-
-class Version(_BaseVersion):
-
-    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
-    def __init__(self, version: str) -> None:
-
-        # Validate the version and parse it into pieces
-        match = self._regex.search(version)
-        if not match:
-            raise InvalidVersion(f"Invalid version: '{version}'")
-
-        # Store the parsed out pieces of the version
-        self._version = _Version(
-            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
-            release=tuple(int(i) for i in match.group("release").split(".")),
-            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
-            post=_parse_letter_version(
-                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
-            ),
-            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
-            local=_parse_local_version(match.group("local")),
-        )
-
-        # Generate a key which will be used for sorting
-        self._key = _cmpkey(
-            self._version.epoch,
-            self._version.release,
-            self._version.pre,
-            self._version.post,
-            self._version.dev,
-            self._version.local,
-        )
-
-    def __repr__(self) -> str:
-        return f""
-
-    def __str__(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        # Pre-release
-        if self.pre is not None:
-            parts.append("".join(str(x) for x in self.pre))
-
-        # Post-release
-        if self.post is not None:
-            parts.append(f".post{self.post}")
-
-        # Development release
-        if self.dev is not None:
-            parts.append(f".dev{self.dev}")
-
-        # Local version segment
-        if self.local is not None:
-            parts.append(f"+{self.local}")
-
-        return "".join(parts)
-
-    @property
-    def epoch(self) -> int:
-        _epoch: int = self._version.epoch
-        return _epoch
-
-    @property
-    def release(self) -> Tuple[int, ...]:
-        _release: Tuple[int, ...] = self._version.release
-        return _release
-
-    @property
-    def pre(self) -> Optional[Tuple[str, int]]:
-        _pre: Optional[Tuple[str, int]] = self._version.pre
-        return _pre
-
-    @property
-    def post(self) -> Optional[int]:
-        return self._version.post[1] if self._version.post else None
-
-    @property
-    def dev(self) -> Optional[int]:
-        return self._version.dev[1] if self._version.dev else None
-
-    @property
-    def local(self) -> Optional[str]:
-        if self._version.local:
-            return ".".join(str(x) for x in self._version.local)
-        else:
-            return None
-
-    @property
-    def public(self) -> str:
-        return str(self).split("+", 1)[0]
-
-    @property
-    def base_version(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        return "".join(parts)
-
-    @property
-    def is_prerelease(self) -> bool:
-        return self.dev is not None or self.pre is not None
-
-    @property
-    def is_postrelease(self) -> bool:
-        return self.post is not None
-
-    @property
-    def is_devrelease(self) -> bool:
-        return self.dev is not None
-
-    @property
-    def major(self) -> int:
-        return self.release[0] if len(self.release) >= 1 else 0
-
-    @property
-    def minor(self) -> int:
-        return self.release[1] if len(self.release) >= 2 else 0
-
-    @property
-    def micro(self) -> int:
-        return self.release[2] if len(self.release) >= 3 else 0
-
-
-def _parse_letter_version(
-    letter: str, number: Union[str, bytes, SupportsInt]
-) -> Optional[Tuple[str, int]]:
-
-    if letter:
-        # We consider there to be an implicit 0 in a pre-release if there is
-        # not a numeral associated with it.
-        if number is None:
-            number = 0
-
-        # We normalize any letters to their lower case form
-        letter = letter.lower()
-
-        # We consider some words to be alternate spellings of other words and
-        # in those cases we want to normalize the spellings to our preferred
-        # spelling.
-        if letter == "alpha":
-            letter = "a"
-        elif letter == "beta":
-            letter = "b"
-        elif letter in ["c", "pre", "preview"]:
-            letter = "rc"
-        elif letter in ["rev", "r"]:
-            letter = "post"
-
-        return letter, int(number)
-    if not letter and number:
-        # We assume if we are given a number, but we are not given a letter
-        # then this is using the implicit post release syntax (e.g. 1.0-1)
-        letter = "post"
-
-        return letter, int(number)
-
-    return None
-
-
-_local_version_separators = re.compile(r"[\._-]")
-
-
-def _parse_local_version(local: str) -> Optional[LocalType]:
-    """
-    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
-    """
-    if local is not None:
-        return tuple(
-            part.lower() if not part.isdigit() else int(part)
-            for part in _local_version_separators.split(local)
-        )
-    return None
-
-
-def _cmpkey(
-    epoch: int,
-    release: Tuple[int, ...],
-    pre: Optional[Tuple[str, int]],
-    post: Optional[Tuple[str, int]],
-    dev: Optional[Tuple[str, int]],
-    local: Optional[Tuple[SubLocalType]],
-) -> CmpKey:
-
-    # When we compare a release version, we want to compare it with all of the
-    # trailing zeros removed. So we'll use a reverse the list, drop all the now
-    # leading zeros until we come to something non zero, then take the rest
-    # re-reverse it back into the correct order and make it a tuple and use
-    # that for our sorting key.
-    _release = tuple(
-        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
-    )
-
-    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
-    # We'll do this by abusing the pre segment, but we _only_ want to do this
-    # if there is not a pre or a post segment. If we have one of those then
-    # the normal sorting rules will handle this case correctly.
-    if pre is None and post is None and dev is not None:
-        _pre: PrePostDevType = NegativeInfinity
-    # Versions without a pre-release (except as noted above) should sort after
-    # those with one.
-    elif pre is None:
-        _pre = Infinity
-    else:
-        _pre = pre
-
-    # Versions without a post segment should sort before those with one.
-    if post is None:
-        _post: PrePostDevType = NegativeInfinity
-
-    else:
-        _post = post
-
-    # Versions without a development segment should sort after those with one.
-    if dev is None:
-        _dev: PrePostDevType = Infinity
-
-    else:
-        _dev = dev
-
-    if local is None:
-        # Versions without a local segment should sort before those with one.
-        _local: LocalType = NegativeInfinity
-    else:
-        # Versions with a local segment need that segment parsed to implement
-        # the sorting rules in PEP440.
-        # - Alpha numeric segments sort before numeric segments
-        # - Alpha numeric segments sort lexicographically
-        # - Numeric segments sort numerically
-        # - Shorter versions sort before longer versions when the prefixes
-        #   match exactly
-        _local = tuple(
-            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
-        )
-
-    return epoch, _release, _pre, _post, _dev, _local
diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/pyparsing.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/pyparsing.py
deleted file mode 100755
index cf75e1e..0000000
--- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/_vendor/pyparsing.py
+++ /dev/null
@@ -1,5742 +0,0 @@
-# module pyparsing.py
-#
-# Copyright (c) 2003-2018  Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = \
-"""
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and executing simple grammars,
-vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
-don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
-provides a library of classes that you use to construct the grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form 
-C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements 
-(L{'+'} operator gives L{And} expressions, strings are auto-converted to
-L{Literal} expressions)::
-
-    from pyparsing import Word, alphas
-
-    # define grammar of a greeting
-    greet = Word(alphas) + "," + Word(alphas) + "!"
-
-    hello = "Hello, World!"
-    print (hello, "->", greet.parseString(hello))
-
-The program outputs the following::
-
-    Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the self-explanatory
-class names, and the use of '+', '|' and '^' operators.
-
-The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an
-object with named attributes.
-
-The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
- - quoted strings
- - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
- - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
- - construct character word-group expressions using the L{Word} class
- - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
- - use L{'+'}, L{'|'}, L{'^'}, and L{'&'} operators to combine simple expressions into more complex ones
- - associate names with your parsed results using L{ParserElement.setResultsName}
- - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
- - find more useful common expressions in the L{pyparsing_common} namespace class
-"""
-
-__version__ = "2.2.1"
-__versionTime__ = "18 Sep 2018 00:49 UTC"
-__author__ = "Paul McGuire "
-
-import string
-from weakref import ref as wkref
-import copy
-import sys
-import warnings
-import re
-import sre_constants
-import collections
-import pprint
-import traceback
-import types
-from datetime import datetime
-
-try:
-    from _thread import RLock
-except ImportError:
-    from threading import RLock
-
-try:
-    # Python 3
-    from collections.abc import Iterable
-    from collections.abc import MutableMapping
-except ImportError:
-    # Python 2.7
-    from collections import Iterable
-    from collections import MutableMapping
-
-try:
-    from collections import OrderedDict as _OrderedDict
-except ImportError:
-    try:
-        from ordereddict import OrderedDict as _OrderedDict
-    except ImportError:
-        _OrderedDict = None
-
-#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
-
-__all__ = [
-'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
-'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
-'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
-'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
-'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
-'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
-'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
-'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
-'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
-'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
-'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
-'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
-'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
-'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
-'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
-'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
-'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
-'CloseMatch', 'tokenMap', 'pyparsing_common',
-]
-
-system_version = tuple(sys.version_info)[:3]
-PY_3 = system_version[0] == 3
-if PY_3:
-    _MAX_INT = sys.maxsize
-    basestring = str
-    unichr = chr
-    _ustr = str
-
-    # build list of single arg builtins, that can be used as parse actions
-    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
-
-else:
-    _MAX_INT = sys.maxint
-    range = xrange
-
-    def _ustr(obj):
-        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
-           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
-           then < returns the unicode object | encodes it with the default encoding | ... >.
-        """
-        if isinstance(obj,unicode):
-            return obj
-
-        try:
-            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
-            # it won't break any existing code.
-            return str(obj)
-
-        except UnicodeEncodeError:
-            # Else encode it
-            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
-            xmlcharref = Regex(r'&#\d+;')
-            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
-            return xmlcharref.transformString(ret)
-
-    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
-    singleArgBuiltins = []
-    import __builtin__
-    for fname in "sum len sorted reversed list tuple set any all min max".split():
-        try:
-            singleArgBuiltins.append(getattr(__builtin__,fname))
-        except AttributeError:
-            continue
-            
-_generatorType = type((y for y in range(1)))
- 
-def _xml_escape(data):
-    """Escape &, <, >, ", ', etc. in a string of data."""
-
-    # ampersand must be replaced first
-    from_symbols = '&><"\''
-    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
-    for from_,to_ in zip(from_symbols, to_symbols):
-        data = data.replace(from_, to_)
-    return data
-
-class _Constants(object):
-    pass
-
-alphas     = string.ascii_uppercase + string.ascii_lowercase
-nums       = "0123456789"
-hexnums    = nums + "ABCDEFabcdef"
-alphanums  = alphas + nums
-_bslash    = chr(92)
-printables = "".join(c for c in string.printable if c not in string.whitespace)
-
-class ParseBaseException(Exception):
-    """base exception class for all parsing runtime exceptions"""
-    # Performance tuning: we construct a *lot* of these, so keep this
-    # constructor as small and fast as possible
-    def __init__( self, pstr, loc=0, msg=None, elem=None ):
-        self.loc = loc
-        if msg is None:
-            self.msg = pstr
-            self.pstr = ""
-        else:
-            self.msg = msg
-            self.pstr = pstr
-        self.parserElement = elem
-        self.args = (pstr, loc, msg)
-
-    @classmethod
-    def _from_exception(cls, pe):
-        """
-        internal factory method to simplify creating one type of ParseException 
-        from another - avoids having __init__ signature conflicts among subclasses
-        """
-        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
-
-    def __getattr__( self, aname ):
-        """supported attributes by name are:
-            - lineno - returns the line number of the exception text
-            - col - returns the column number of the exception text
-            - line - returns the line containing the exception text
-        """
-        if( aname == "lineno" ):
-            return lineno( self.loc, self.pstr )
-        elif( aname in ("col", "column") ):
-            return col( self.loc, self.pstr )
-        elif( aname == "line" ):
-            return line( self.loc, self.pstr )
-        else:
-            raise AttributeError(aname)
-
-    def __str__( self ):
-        return "%s (at char %d), (line:%d, col:%d)" % \
-                ( self.msg, self.loc, self.lineno, self.column )
-    def __repr__( self ):
-        return _ustr(self)
-    def markInputline( self, markerString = ">!<" ):
-        """Extracts the exception line from the input string, and marks
-           the location of the exception with a special symbol.
-        """
-        line_str = self.line
-        line_column = self.column - 1
-        if markerString:
-            line_str = "".join((line_str[:line_column],
-                                markerString, line_str[line_column:]))
-        return line_str.strip()
-    def __dir__(self):
-        return "lineno col line".split() + dir(type(self))
-
-class ParseException(ParseBaseException):
-    """
-    Exception thrown when parse expressions don't match class;
-    supported attributes by name are:
-     - lineno - returns the line number of the exception text
-     - col - returns the column number of the exception text
-     - line - returns the line containing the exception text
-        
-    Example::
-        try:
-            Word(nums).setName("integer").parseString("ABC")
-        except ParseException as pe:
-            print(pe)
-            print("column: {}".format(pe.col))
-            
-    prints::
-       Expected integer (at char 0), (line:1, col:1)
-        column: 1
-    """
-    pass
-
-class ParseFatalException(ParseBaseException):
-    """user-throwable exception thrown when inconsistent parse content
-       is found; stops all parsing immediately"""
-    pass
-
-class ParseSyntaxException(ParseFatalException):
-    """just like L{ParseFatalException}, but thrown internally when an
-       L{ErrorStop} ('-' operator) indicates that parsing is to stop 
-       immediately because an unbacktrackable syntax error has been found"""
-    pass
-
-#~ class ReparseException(ParseBaseException):
-    #~ """Experimental class - parse actions can raise this exception to cause
-       #~ pyparsing to reparse the input string:
-        #~ - with a modified input string, and/or
-        #~ - with a modified start location
-       #~ Set the values of the ReparseException in the constructor, and raise the
-       #~ exception in a parse action to cause pyparsing to use the new string/location.
-       #~ Setting the values as None causes no change to be made.
-       #~ """
-    #~ def __init_( self, newstring, restartLoc ):
-        #~ self.newParseText = newstring
-        #~ self.reparseLoc = restartLoc
-
-class RecursiveGrammarException(Exception):
-    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
-    def __init__( self, parseElementList ):
-        self.parseElementTrace = parseElementList
-
-    def __str__( self ):
-        return "RecursiveGrammarException: %s" % self.parseElementTrace
-
-class _ParseResultsWithOffset(object):
-    def __init__(self,p1,p2):
-        self.tup = (p1,p2)
-    def __getitem__(self,i):
-        return self.tup[i]
-    def __repr__(self):
-        return repr(self.tup[0])
-    def setOffset(self,i):
-        self.tup = (self.tup[0],i)
-
-class ParseResults(object):
-    """
-    Structured parse results, to provide multiple means of access to the parsed data:
-       - as a list (C{len(results)})
-       - by list index (C{results[0], results[1]}, etc.)
-       - by attribute (C{results.} - see L{ParserElement.setResultsName})
-
-    Example::
-        integer = Word(nums)
-        date_str = (integer.setResultsName("year") + '/' 
-                        + integer.setResultsName("month") + '/' 
-                        + integer.setResultsName("day"))
-        # equivalent form:
-        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-        # parseString returns a ParseResults object
-        result = date_str.parseString("1999/12/31")
-
-        def test(s, fn=repr):
-            print("%s -> %s" % (s, fn(eval(s))))
-        test("list(result)")
-        test("result[0]")
-        test("result['month']")
-        test("result.day")
-        test("'month' in result")
-        test("'minutes' in result")
-        test("result.dump()", str)
-    prints::
-        list(result) -> ['1999', '/', '12', '/', '31']
-        result[0] -> '1999'
-        result['month'] -> '12'
-        result.day -> '31'
-        'month' in result -> True
-        'minutes' in result -> False
-        result.dump() -> ['1999', '/', '12', '/', '31']
-        - day: 31
-        - month: 12
-        - year: 1999
-    """
-    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
-        if isinstance(toklist, cls):
-            return toklist
-        retobj = object.__new__(cls)
-        retobj.__doinit = True
-        return retobj
-
-    # Performance tuning: we construct a *lot* of these, so keep this
-    # constructor as small and fast as possible
-    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
-        if self.__doinit:
-            self.__doinit = False
-            self.__name = None
-            self.__parent = None
-            self.__accumNames = {}
-            self.__asList = asList
-            self.__modal = modal
-            if toklist is None:
-                toklist = []
-            if isinstance(toklist, list):
-                self.__toklist = toklist[:]
-            elif isinstance(toklist, _generatorType):
-                self.__toklist = list(toklist)
-            else:
-                self.__toklist = [toklist]
-            self.__tokdict = dict()
-
-        if name is not None and name:
-            if not modal:
-                self.__accumNames[name] = 0
-            if isinstance(name,int):
-                name = _ustr(name) # will always return a str, but use _ustr for consistency
-            self.__name = name
-            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
-                if isinstance(toklist,basestring):
-                    toklist = [ toklist ]
-                if asList:
-                    if isinstance(toklist,ParseResults):
-                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
-                    else:
-                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
-                    self[name].__name = name
-                else:
-                    try:
-                        self[name] = toklist[0]
-                    except (KeyError,TypeError,IndexError):
-                        self[name] = toklist
-
-    def __getitem__( self, i ):
-        if isinstance( i, (int,slice) ):
-            return self.__toklist[i]
-        else:
-            if i not in self.__accumNames:
-                return self.__tokdict[i][-1][0]
-            else:
-                return ParseResults([ v[0] for v in self.__tokdict[i] ])
-
-    def __setitem__( self, k, v, isinstance=isinstance ):
-        if isinstance(v,_ParseResultsWithOffset):
-            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
-            sub = v[0]
-        elif isinstance(k,(int,slice)):
-            self.__toklist[k] = v
-            sub = v
-        else:
-            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
-            sub = v
-        if isinstance(sub,ParseResults):
-            sub.__parent = wkref(self)
-
-    def __delitem__( self, i ):
-        if isinstance(i,(int,slice)):
-            mylen = len( self.__toklist )
-            del self.__toklist[i]
-
-            # convert int to slice
-            if isinstance(i, int):
-                if i < 0:
-                    i += mylen
-                i = slice(i, i+1)
-            # get removed indices
-            removed = list(range(*i.indices(mylen)))
-            removed.reverse()
-            # fixup indices in token dictionary
-            for name,occurrences in self.__tokdict.items():
-                for j in removed:
-                    for k, (value, position) in enumerate(occurrences):
-                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
-        else:
-            del self.__tokdict[i]
-
-    def __contains__( self, k ):
-        return k in self.__tokdict
-
-    def __len__( self ): return len( self.__toklist )
-    def __bool__(self): return ( not not self.__toklist )
-    __nonzero__ = __bool__
-    def __iter__( self ): return iter( self.__toklist )
-    def __reversed__( self ): return iter( self.__toklist[::-1] )
-    def _iterkeys( self ):
-        if hasattr(self.__tokdict, "iterkeys"):
-            return self.__tokdict.iterkeys()
-        else:
-            return iter(self.__tokdict)
-
-    def _itervalues( self ):
-        return (self[k] for k in self._iterkeys())
-            
-    def _iteritems( self ):
-        return ((k, self[k]) for k in self._iterkeys())
-
-    if PY_3:
-        keys = _iterkeys       
-        """Returns an iterator of all named result keys (Python 3.x only)."""
-
-        values = _itervalues
-        """Returns an iterator of all named result values (Python 3.x only)."""
-
-        items = _iteritems
-        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
-
-    else:
-        iterkeys = _iterkeys
-        """Returns an iterator of all named result keys (Python 2.x only)."""
-
-        itervalues = _itervalues
-        """Returns an iterator of all named result values (Python 2.x only)."""
-
-        iteritems = _iteritems
-        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
-
-        def keys( self ):
-            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.iterkeys())
-
-        def values( self ):
-            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.itervalues())
-                
-        def items( self ):
-            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
-            return list(self.iteritems())
-
-    def haskeys( self ):
-        """Since keys() returns an iterator, this method is helpful in bypassing
-           code that looks for the existence of any defined results names."""
-        return bool(self.__tokdict)
-        
-    def pop( self, *args, **kwargs):
-        """
-        Removes and returns item at specified index (default=C{last}).
-        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
-        argument or an integer argument, it will use C{list} semantics
-        and pop tokens from the list of parsed tokens. If passed a 
-        non-integer argument (most likely a string), it will use C{dict}
-        semantics and pop the corresponding value from any defined 
-        results names. A second default return value argument is 
-        supported, just as in C{dict.pop()}.
-
-        Example::
-            def remove_first(tokens):
-                tokens.pop(0)
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
-
-            label = Word(alphas)
-            patt = label("LABEL") + OneOrMore(Word(nums))
-            print(patt.parseString("AAB 123 321").dump())
-
-            # Use pop() in a parse action to remove named result (note that corresponding value is not
-            # removed from list form of results)
-            def remove_LABEL(tokens):
-                tokens.pop("LABEL")
-                return tokens
-            patt.addParseAction(remove_LABEL)
-            print(patt.parseString("AAB 123 321").dump())
-        prints::
-            ['AAB', '123', '321']
-            - LABEL: AAB
-
-            ['AAB', '123', '321']
-        """
-        if not args:
-            args = [-1]
-        for k,v in kwargs.items():
-            if k == 'default':
-                args = (args[0], v)
-            else:
-                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
-        if (isinstance(args[0], int) or 
-                        len(args) == 1 or 
-                        args[0] in self):
-            index = args[0]
-            ret = self[index]
-            del self[index]
-            return ret
-        else:
-            defaultvalue = args[1]
-            return defaultvalue
-
-    def get(self, key, defaultValue=None):
-        """
-        Returns named result matching the given key, or if there is no
-        such name, then returns the given C{defaultValue} or C{None} if no
-        C{defaultValue} is specified.
-
-        Similar to C{dict.get()}.
-        
-        Example::
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
-
-            result = date_str.parseString("1999/12/31")
-            print(result.get("year")) # -> '1999'
-            print(result.get("hour", "not specified")) # -> 'not specified'
-            print(result.get("hour")) # -> None
-        """
-        if key in self:
-            return self[key]
-        else:
-            return defaultValue
-
-    def insert( self, index, insStr ):
-        """
-        Inserts new element at location index in the list of parsed tokens.
-        
-        Similar to C{list.insert()}.
-
-        Example::
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
-            # use a parse action to insert the parse location in the front of the parsed results
-            def insert_locn(locn, tokens):
-                tokens.insert(0, locn)
-            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
-        """
-        self.__toklist.insert(index, insStr)
-        # fixup indices in token dictionary
-        for name,occurrences in self.__tokdict.items():
-            for k, (value, position) in enumerate(occurrences):
-                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
-
-    def append( self, item ):
-        """
-        Add single element to end of ParseResults list of elements.
-
-        Example::
-            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-            
-            # use a parse action to compute the sum of the parsed integers, and add it to the end
-            def append_sum(tokens):
-                tokens.append(sum(map(int, tokens)))
-            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
-        """
-        self.__toklist.append(item)
-
-    def extend( self, itemseq ):
-        """
-        Add sequence of elements to end of ParseResults list of elements.
-
-        Example::
-            patt = OneOrMore(Word(alphas))
-            
-            # use a parse action to append the reverse of the matched strings, to make a palindrome
-            def make_palindrome(tokens):
-                tokens.extend(reversed([t[::-1] for t in tokens]))
-                return ''.join(tokens)
-            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
-        """
-        if isinstance(itemseq, ParseResults):
-            self += itemseq
-        else:
-            self.__toklist.extend(itemseq)
-
-    def clear( self ):
-        """
-        Clear all elements and results names.
-        """
-        del self.__toklist[:]
-        self.__tokdict.clear()
-
-    def __getattr__( self, name ):
-        try:
-            return self[name]
-        except KeyError:
-            return ""
-            
-        if name in self.__tokdict:
-            if name not in self.__accumNames:
-                return self.__tokdict[name][-1][0]
-            else:
-                return ParseResults([ v[0] for v in self.__tokdict[name] ])
-        else:
-            return ""
-
-    def __add__( self, other ):
-        ret = self.copy()
-        ret += other
-        return ret
-
-    def __iadd__( self, other ):
-        if other.__tokdict:
-            offset = len(self.__toklist)
-            addoffset = lambda a: offset if a<0 else a+offset
-            otheritems = other.__tokdict.items()
-            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
-                                for (k,vlist) in otheritems for v in vlist]
-            for k,v in otherdictitems:
-                self[k] = v
-                if isinstance(v[0],ParseResults):
-                    v[0].__parent = wkref(self)
-            
-        self.__toklist += other.__toklist
-        self.__accumNames.update( other.__accumNames )
-        return self
-
-    def __radd__(self, other):
-        if isinstance(other,int) and other == 0:
-            # useful for merging many ParseResults using sum() builtin
-            return self.copy()
-        else:
-            # this may raise a TypeError - so be it
-            return other + self
-        
-    def __repr__( self ):
-        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
-
-    def __str__( self ):
-        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
-
-    def _asStringList( self, sep='' ):
-        out = []
-        for item in self.__toklist:
-            if out and sep:
-                out.append(sep)
-            if isinstance( item, ParseResults ):
-                out += item._asStringList()
-            else:
-                out.append( _ustr(item) )
-        return out
-
-    def asList( self ):
-        """
-        Returns the parse results as a nested list of matching tokens, all converted to strings.
-
-        Example::
-            patt = OneOrMore(Word(alphas))
-            result = patt.parseString("sldkj lsdkj sldkj")
-            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
-            print(type(result), result) # ->  ['sldkj', 'lsdkj', 'sldkj']
-            
-            # Use asList() to create an actual list
-            result_list = result.asList()
-            print(type(result_list), result_list) # ->  ['sldkj', 'lsdkj', 'sldkj']
-        """
-        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
-
-    def asDict( self ):
-        """
-        Returns the named parse results as a nested dictionary.
-
-        Example::
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-            
-            result = date_str.parseString('12/31/1999')
-            print(type(result), repr(result)) # ->  (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
-            
-            result_dict = result.asDict()
-            print(type(result_dict), repr(result_dict)) # ->  {'day': '1999', 'year': '12', 'month': '31'}
-
-            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
-            import json
-            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
-            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
-        """
-        if PY_3:
-            item_fn = self.items
-        else:
-            item_fn = self.iteritems
-            
-        def toItem(obj):
-            if isinstance(obj, ParseResults):
-                if obj.haskeys():
-                    return obj.asDict()
-                else:
-                    return [toItem(v) for v in obj]
-            else:
-                return obj
-                
-        return dict((k,toItem(v)) for k,v in item_fn())
-
-    def copy( self ):
-        """
-        Returns a new copy of a C{ParseResults} object.
-        """
-        ret = ParseResults( self.__toklist )
-        ret.__tokdict = self.__tokdict.copy()
-        ret.__parent = self.__parent
-        ret.__accumNames.update( self.__accumNames )
-        ret.__name = self.__name
-        return ret
-
-    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
-        """
-        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
-        """
-        nl = "\n"
-        out = []
-        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
-                                                            for v in vlist)
-        nextLevelIndent = indent + "  "
-
-        # collapse out indents if formatting is not desired
-        if not formatted:
-            indent = ""
-            nextLevelIndent = ""
-            nl = ""
-
-        selfTag = None
-        if doctag is not None:
-            selfTag = doctag
-        else:
-            if self.__name:
-                selfTag = self.__name
-
-        if not selfTag:
-            if namedItemsOnly:
-                return ""
-            else:
-                selfTag = "ITEM"
-
-        out += [ nl, indent, "<", selfTag, ">" ]
-
-        for i,res in enumerate(self.__toklist):
-            if isinstance(res,ParseResults):
-                if i in namedItems:
-                    out += [ res.asXML(namedItems[i],
-                                        namedItemsOnly and doctag is None,
-                                        nextLevelIndent,
-                                        formatted)]
-                else:
-                    out += [ res.asXML(None,
-                                        namedItemsOnly and doctag is None,
-                                        nextLevelIndent,
-                                        formatted)]
-            else:
-                # individual token, see if there is a name for it
-                resTag = None
-                if i in namedItems:
-                    resTag = namedItems[i]
-                if not resTag:
-                    if namedItemsOnly:
-                        continue
-                    else:
-                        resTag = "ITEM"
-                xmlBodyText = _xml_escape(_ustr(res))
-                out += [ nl, nextLevelIndent, "<", resTag, ">",
-                                                xmlBodyText,
-                                                "" ]
-
-        out += [ nl, indent, "" ]
-        return "".join(out)
-
-    def __lookup(self,sub):
-        for k,vlist in self.__tokdict.items():
-            for v,loc in vlist:
-                if sub is v:
-                    return k
-        return None
-
-    def getName(self):
-        r"""
-        Returns the results name for this token expression. Useful when several 
-        different expressions might match at a particular location.
-
-        Example::
-            integer = Word(nums)
-            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
-            house_number_expr = Suppress('#') + Word(nums, alphanums)
-            user_data = (Group(house_number_expr)("house_number") 
-                        | Group(ssn_expr)("ssn")
-                        | Group(integer)("age"))
-            user_info = OneOrMore(user_data)
-            
-            result = user_info.parseString("22 111-22-3333 #221B")
-            for item in result:
-                print(item.getName(), ':', item[0])
-        prints::
-            age : 22
-            ssn : 111-22-3333
-            house_number : 221B
-        """
-        if self.__name:
-            return self.__name
-        elif self.__parent:
-            par = self.__parent()
-            if par:
-                return par.__lookup(self)
-            else:
-                return None
-        elif (len(self) == 1 and
-               len(self.__tokdict) == 1 and
-               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
-            return next(iter(self.__tokdict.keys()))
-        else:
-            return None
-
-    def dump(self, indent='', depth=0, full=True):
-        """
-        Diagnostic method for listing out the contents of a C{ParseResults}.
-        Accepts an optional C{indent} argument so that this string can be embedded
-        in a nested display of other data.
-
-        Example::
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-            
-            result = date_str.parseString('12/31/1999')
-            print(result.dump())
-        prints::
-            ['12', '/', '31', '/', '1999']
-            - day: 1999
-            - month: 31
-            - year: 12
-        """
-        out = []
-        NL = '\n'
-        out.append( indent+_ustr(self.asList()) )
-        if full:
-            if self.haskeys():
-                items = sorted((str(k), v) for k,v in self.items())
-                for k,v in items:
-                    if out:
-                        out.append(NL)
-                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
-                    if isinstance(v,ParseResults):
-                        if v:
-                            out.append( v.dump(indent,depth+1) )
-                        else:
-                            out.append(_ustr(v))
-                    else:
-                        out.append(repr(v))
-            elif any(isinstance(vv,ParseResults) for vv in self):
-                v = self
-                for i,vv in enumerate(v):
-                    if isinstance(vv,ParseResults):
-                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
-                    else:
-                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
-            
-        return "".join(out)
-
-    def pprint(self, *args, **kwargs):
-        """
-        Pretty-printer for parsed results as a list, using the C{pprint} module.
-        Accepts additional positional or keyword args as defined for the 
-        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
-
-        Example::
-            ident = Word(alphas, alphanums)
-            num = Word(nums)
-            func = Forward()
-            term = ident | num | Group('(' + func + ')')
-            func <<= ident + Group(Optional(delimitedList(term)))
-            result = func.parseString("fna a,b,(fnb c,d,200),100")
-            result.pprint(width=40)
-        prints::
-            ['fna',
-             ['a',
-              'b',
-              ['(', 'fnb', ['c', 'd', '200'], ')'],
-              '100']]
-        """
-        pprint.pprint(self.asList(), *args, **kwargs)
-
-    # add support for pickle protocol
-    def __getstate__(self):
-        return ( self.__toklist,
-                 ( self.__tokdict.copy(),
-                   self.__parent is not None and self.__parent() or None,
-                   self.__accumNames,
-                   self.__name ) )
-
-    def __setstate__(self,state):
-        self.__toklist = state[0]
-        (self.__tokdict,
-         par,
-         inAccumNames,
-         self.__name) = state[1]
-        self.__accumNames = {}
-        self.__accumNames.update(inAccumNames)
-        if par is not None:
-            self.__parent = wkref(par)
-        else:
-            self.__parent = None
-
-    def __getnewargs__(self):
-        return self.__toklist, self.__name, self.__asList, self.__modal
-
-    def __dir__(self):
-        return (dir(type(self)) + list(self.keys()))
-
-MutableMapping.register(ParseResults)
-
-def col (loc,strg):
-    """Returns current column within a string, counting newlines as line separators.
-   The first column is number 1.
-
-   Note: the default parsing behavior is to expand tabs in the input string
-   before starting the parsing process.  See L{I{ParserElement.parseString}} for more information
-   on parsing strings containing C{}s, and suggested methods to maintain a
-   consistent view of the parsed string, the parse location, and line and column
-   positions within the parsed string.
-   """
-    s = strg
-    return 1 if 0} for more information
-   on parsing strings containing C{}s, and suggested methods to maintain a
-   consistent view of the parsed string, the parse location, and line and column
-   positions within the parsed string.
-   """
-    return strg.count("\n",0,loc) + 1
-
-def line( loc, strg ):
-    """Returns the line of text containing loc within a string, counting newlines as line separators.
-       """
-    lastCR = strg.rfind("\n", 0, loc)
-    nextCR = strg.find("\n", loc)
-    if nextCR >= 0:
-        return strg[lastCR+1:nextCR]
-    else:
-        return strg[lastCR+1:]
-
-def _defaultStartDebugAction( instring, loc, expr ):
-    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
-
-def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
-    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
-
-def _defaultExceptionDebugAction( instring, loc, expr, exc ):
-    print ("Exception raised:" + _ustr(exc))
-
-def nullDebugAction(*args):
-    """'Do-nothing' debug action, to suppress debugging output during parsing."""
-    pass
-
-# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
-#~ 'decorator to trim function calls to match the arity of the target'
-#~ def _trim_arity(func, maxargs=3):
-    #~ if func in singleArgBuiltins:
-        #~ return lambda s,l,t: func(t)
-    #~ limit = 0
-    #~ foundArity = False
-    #~ def wrapper(*args):
-        #~ nonlocal limit,foundArity
-        #~ while 1:
-            #~ try:
-                #~ ret = func(*args[limit:])
-                #~ foundArity = True
-                #~ return ret
-            #~ except TypeError:
-                #~ if limit == maxargs or foundArity:
-                    #~ raise
-                #~ limit += 1
-                #~ continue
-    #~ return wrapper
-
-# this version is Python 2.x-3.x cross-compatible
-'decorator to trim function calls to match the arity of the target'
-def _trim_arity(func, maxargs=2):
-    if func in singleArgBuiltins:
-        return lambda s,l,t: func(t)
-    limit = [0]
-    foundArity = [False]
-    
-    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
-    if system_version[:2] >= (3,5):
-        def extract_stack(limit=0):
-            # special handling for Python 3.5.0 - extra deep call stack by 1
-            offset = -3 if system_version == (3,5,0) else -2
-            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
-            return [frame_summary[:2]]
-        def extract_tb(tb, limit=0):
-            frames = traceback.extract_tb(tb, limit=limit)
-            frame_summary = frames[-1]
-            return [frame_summary[:2]]
-    else:
-        extract_stack = traceback.extract_stack
-        extract_tb = traceback.extract_tb
-    
-    # synthesize what would be returned by traceback.extract_stack at the call to 
-    # user's parse action 'func', so that we don't incur call penalty at parse time
-    
-    LINE_DIFF = 6
-    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
-    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
-    this_line = extract_stack(limit=2)[-1]
-    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
-
-    def wrapper(*args):
-        while 1:
-            try:
-                ret = func(*args[limit[0]:])
-                foundArity[0] = True
-                return ret
-            except TypeError:
-                # re-raise TypeErrors if they did not come from our arity testing
-                if foundArity[0]:
-                    raise
-                else:
-                    try:
-                        tb = sys.exc_info()[-1]
-                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
-                            raise
-                    finally:
-                        del tb
-
-                if limit[0] <= maxargs:
-                    limit[0] += 1
-                    continue
-                raise
-
-    # copy func name to wrapper for sensible debug output
-    func_name = ""
-    try:
-        func_name = getattr(func, '__name__', 
-                            getattr(func, '__class__').__name__)
-    except Exception:
-        func_name = str(func)
-    wrapper.__name__ = func_name
-
-    return wrapper
-
-class ParserElement(object):
-    """Abstract base level parser element class."""
-    DEFAULT_WHITE_CHARS = " \n\t\r"
-    verbose_stacktrace = False
-
-    @staticmethod
-    def setDefaultWhitespaceChars( chars ):
-        r"""
-        Overrides the default whitespace chars
-
-        Example::
-            # default whitespace chars are space,  and newline
-            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
-            
-            # change to just treat newline as significant
-            ParserElement.setDefaultWhitespaceChars(" \t")
-            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
-        """
-        ParserElement.DEFAULT_WHITE_CHARS = chars
-
-    @staticmethod
-    def inlineLiteralsUsing(cls):
-        """
-        Set class to be used for inclusion of string literals into a parser.
-        
-        Example::
-            # default literal class used is Literal
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
-
-
-            # change to Suppress
-            ParserElement.inlineLiteralsUsing(Suppress)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
-        """
-        ParserElement._literalStringClass = cls
-
-    def __init__( self, savelist=False ):
-        self.parseAction = list()
-        self.failAction = None
-        #~ self.name = ""  # don't define self.name, let subclasses try/except upcall
-        self.strRepr = None
-        self.resultsName = None
-        self.saveAsList = savelist
-        self.skipWhitespace = True
-        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
-        self.copyDefaultWhiteChars = True
-        self.mayReturnEmpty = False # used when checking for left-recursion
-        self.keepTabs = False
-        self.ignoreExprs = list()
-        self.debug = False
-        self.streamlined = False
-        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
-        self.errmsg = ""
-        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
-        self.debugActions = ( None, None, None ) #custom debug actions
-        self.re = None
-        self.callPreparse = True # used to avoid redundant calls to preParse
-        self.callDuringTry = False
-
-    def copy( self ):
-        """
-        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
-        for the same parsing pattern, using copies of the original parse element.
-        
-        Example::
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
-            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
-            
-            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
-        prints::
-            [5120, 100, 655360, 268435456]
-        Equivalent form of C{expr.copy()} is just C{expr()}::
-            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
-        """
-        cpy = copy.copy( self )
-        cpy.parseAction = self.parseAction[:]
-        cpy.ignoreExprs = self.ignoreExprs[:]
-        if self.copyDefaultWhiteChars:
-            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
-        return cpy
-
-    def setName( self, name ):
-        """
-        Define name for this expression, makes debugging and exception messages clearer.
-        
-        Example::
-            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
-            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
-        """
-        self.name = name
-        self.errmsg = "Expected " + self.name
-        if hasattr(self,"exception"):
-            self.exception.msg = self.errmsg
-        return self
-
-    def setResultsName( self, name, listAllMatches=False ):
-        """
-        Define name for referencing matching tokens as a nested attribute
-        of the returned parse results.
-        NOTE: this returns a *copy* of the original C{ParserElement} object;
-        this is so that the client can define a basic element, such as an
-        integer, and reference it in multiple places with different names.
-
-        You can also set results names using the abbreviated syntax,
-        C{expr("name")} in place of C{expr.setResultsName("name")} - 
-        see L{I{__call__}<__call__>}.
-
-        Example::
-            date_str = (integer.setResultsName("year") + '/' 
-                        + integer.setResultsName("month") + '/' 
-                        + integer.setResultsName("day"))
-
-            # equivalent form:
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-        """
-        newself = self.copy()
-        if name.endswith("*"):
-            name = name[:-1]
-            listAllMatches=True
-        newself.resultsName = name
-        newself.modalResults = not listAllMatches
-        return newself
-
-    def setBreak(self,breakFlag = True):
-        """Method to invoke the Python pdb debugger when this element is
-           about to be parsed. Set C{breakFlag} to True to enable, False to
-           disable.
-        """
-        if breakFlag:
-            _parseMethod = self._parse
-            def breaker(instring, loc, doActions=True, callPreParse=True):
-                import pdb
-                pdb.set_trace()
-                return _parseMethod( instring, loc, doActions, callPreParse )
-            breaker._originalParseMethod = _parseMethod
-            self._parse = breaker
-        else:
-            if hasattr(self._parse,"_originalParseMethod"):
-                self._parse = self._parse._originalParseMethod
-        return self
-
-    def setParseAction( self, *fns, **kwargs ):
-        """
-        Define one or more actions to perform when successfully matching parse element definition.
-        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
-        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
-         - s   = the original string being parsed (see note below)
-         - loc = the location of the matching substring
-         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
-        If the functions in fns modify the tokens, they can return them as the return
-        value from fn, and the modified list of tokens will replace the original.
-        Otherwise, fn does not need to return any value.
-
-        Optional keyword arguments:
-         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
-
-        Note: the default parsing behavior is to expand tabs in the input string
-        before starting the parsing process.  See L{I{parseString}} for more information
-        on parsing strings containing C{}s, and suggested methods to maintain a
-        consistent view of the parsed string, the parse location, and line and column
-        positions within the parsed string.
-        
-        Example::
-            integer = Word(nums)
-            date_str = integer + '/' + integer + '/' + integer
-
-            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
-
-            # use parse action to convert to ints at parse time
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            date_str = integer + '/' + integer + '/' + integer
-
-            # note that integer fields are now ints, not strings
-            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
-        """
-        self.parseAction = list(map(_trim_arity, list(fns)))
-        self.callDuringTry = kwargs.get("callDuringTry", False)
-        return self
-
-    def addParseAction( self, *fns, **kwargs ):
-        """
-        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}}.
-        
-        See examples in L{I{copy}}.
-        """
-        self.parseAction += list(map(_trim_arity, list(fns)))
-        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
-        return self
-
-    def addCondition(self, *fns, **kwargs):
-        """Add a boolean predicate function to expression's list of parse actions. See 
-        L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, 
-        functions passed to C{addCondition} need to return boolean success/fail of the condition.
-
-        Optional keyword arguments:
-         - message = define a custom message to be used in the raised exception
-         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
-         
-        Example::
-            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
-            year_int = integer.copy()
-            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
-            date_str = year_int + '/' + integer + '/' + integer
-
-            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
-        """
-        msg = kwargs.get("message", "failed user-defined condition")
-        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
-        for fn in fns:
-            def pa(s,l,t):
-                if not bool(_trim_arity(fn)(s,l,t)):
-                    raise exc_type(s,l,msg)
-            self.parseAction.append(pa)
-        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
-        return self
-
-    def setFailAction( self, fn ):
-        """Define action to perform if parsing fails at this expression.
-           Fail acton fn is a callable function that takes the arguments
-           C{fn(s,loc,expr,err)} where:
-            - s = string being parsed
-            - loc = location where expression match was attempted and failed
-            - expr = the parse expression that failed
-            - err = the exception thrown
-           The function returns no value.  It may throw C{L{ParseFatalException}}
-           if it is desired to stop parsing immediately."""
-        self.failAction = fn
-        return self
-
-    def _skipIgnorables( self, instring, loc ):
-        exprsFound = True
-        while exprsFound:
-            exprsFound = False
-            for e in self.ignoreExprs:
-                try:
-                    while 1:
-                        loc,dummy = e._parse( instring, loc )
-                        exprsFound = True
-                except ParseException:
-                    pass
-        return loc
-
-    def preParse( self, instring, loc ):
-        if self.ignoreExprs:
-            loc = self._skipIgnorables( instring, loc )
-
-        if self.skipWhitespace:
-            wt = self.whiteChars
-            instrlen = len(instring)
-            while loc < instrlen and instring[loc] in wt:
-                loc += 1
-
-        return loc
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        return loc, []
-
-    def postParse( self, instring, loc, tokenlist ):
-        return tokenlist
-
-    #~ @profile
-    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
-        debugging = ( self.debug ) #and doActions )
-
-        if debugging or self.failAction:
-            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
-            if (self.debugActions[0] ):
-                self.debugActions[0]( instring, loc, self )
-            if callPreParse and self.callPreparse:
-                preloc = self.preParse( instring, loc )
-            else:
-                preloc = loc
-            tokensStart = preloc
-            try:
-                try:
-                    loc,tokens = self.parseImpl( instring, preloc, doActions )
-                except IndexError:
-                    raise ParseException( instring, len(instring), self.errmsg, self )
-            except ParseBaseException as err:
-                #~ print ("Exception raised:", err)
-                if self.debugActions[2]:
-                    self.debugActions[2]( instring, tokensStart, self, err )
-                if self.failAction:
-                    self.failAction( instring, tokensStart, self, err )
-                raise
-        else:
-            if callPreParse and self.callPreparse:
-                preloc = self.preParse( instring, loc )
-            else:
-                preloc = loc
-            tokensStart = preloc
-            if self.mayIndexError or preloc >= len(instring):
-                try:
-                    loc,tokens = self.parseImpl( instring, preloc, doActions )
-                except IndexError:
-                    raise ParseException( instring, len(instring), self.errmsg, self )
-            else:
-                loc,tokens = self.parseImpl( instring, preloc, doActions )
-
-        tokens = self.postParse( instring, loc, tokens )
-
-        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
-        if self.parseAction and (doActions or self.callDuringTry):
-            if debugging:
-                try:
-                    for fn in self.parseAction:
-                        tokens = fn( instring, tokensStart, retTokens )
-                        if tokens is not None:
-                            retTokens = ParseResults( tokens,
-                                                      self.resultsName,
-                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
-                                                      modal=self.modalResults )
-                except ParseBaseException as err:
-                    #~ print "Exception raised in user parse action:", err
-                    if (self.debugActions[2] ):
-                        self.debugActions[2]( instring, tokensStart, self, err )
-                    raise
-            else:
-                for fn in self.parseAction:
-                    tokens = fn( instring, tokensStart, retTokens )
-                    if tokens is not None:
-                        retTokens = ParseResults( tokens,
-                                                  self.resultsName,
-                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
-                                                  modal=self.modalResults )
-        if debugging:
-            #~ print ("Matched",self,"->",retTokens.asList())
-            if (self.debugActions[1] ):
-                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
-
-        return loc, retTokens
-
-    def tryParse( self, instring, loc ):
-        try:
-            return self._parse( instring, loc, doActions=False )[0]
-        except ParseFatalException:
-            raise ParseException( instring, loc, self.errmsg, self)
-    
-    def canParseNext(self, instring, loc):
-        try:
-            self.tryParse(instring, loc)
-        except (ParseException, IndexError):
-            return False
-        else:
-            return True
-
-    class _UnboundedCache(object):
-        def __init__(self):
-            cache = {}
-            self.not_in_cache = not_in_cache = object()
-
-            def get(self, key):
-                return cache.get(key, not_in_cache)
-
-            def set(self, key, value):
-                cache[key] = value
-
-            def clear(self):
-                cache.clear()
-                
-            def cache_len(self):
-                return len(cache)
-
-            self.get = types.MethodType(get, self)
-            self.set = types.MethodType(set, self)
-            self.clear = types.MethodType(clear, self)
-            self.__len__ = types.MethodType(cache_len, self)
-
-    if _OrderedDict is not None:
-        class _FifoCache(object):
-            def __init__(self, size):
-                self.not_in_cache = not_in_cache = object()
-
-                cache = _OrderedDict()
-
-                def get(self, key):
-                    return cache.get(key, not_in_cache)
-
-                def set(self, key, value):
-                    cache[key] = value
-                    while len(cache) > size:
-                        try:
-                            cache.popitem(False)
-                        except KeyError:
-                            pass
-
-                def clear(self):
-                    cache.clear()
-
-                def cache_len(self):
-                    return len(cache)
-
-                self.get = types.MethodType(get, self)
-                self.set = types.MethodType(set, self)
-                self.clear = types.MethodType(clear, self)
-                self.__len__ = types.MethodType(cache_len, self)
-
-    else:
-        class _FifoCache(object):
-            def __init__(self, size):
-                self.not_in_cache = not_in_cache = object()
-
-                cache = {}
-                key_fifo = collections.deque([], size)
-
-                def get(self, key):
-                    return cache.get(key, not_in_cache)
-
-                def set(self, key, value):
-                    cache[key] = value
-                    while len(key_fifo) > size:
-                        cache.pop(key_fifo.popleft(), None)
-                    key_fifo.append(key)
-
-                def clear(self):
-                    cache.clear()
-                    key_fifo.clear()
-
-                def cache_len(self):
-                    return len(cache)
-
-                self.get = types.MethodType(get, self)
-                self.set = types.MethodType(set, self)
-                self.clear = types.MethodType(clear, self)
-                self.__len__ = types.MethodType(cache_len, self)
-
-    # argument cache for optimizing repeated calls when backtracking through recursive expressions
-    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
-    packrat_cache_lock = RLock()
-    packrat_cache_stats = [0, 0]
-
-    # this method gets repeatedly called during backtracking with the same arguments -
-    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
-    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
-        HIT, MISS = 0, 1
-        lookup = (self, instring, loc, callPreParse, doActions)
-        with ParserElement.packrat_cache_lock:
-            cache = ParserElement.packrat_cache
-            value = cache.get(lookup)
-            if value is cache.not_in_cache:
-                ParserElement.packrat_cache_stats[MISS] += 1
-                try:
-                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
-                except ParseBaseException as pe:
-                    # cache a copy of the exception, without the traceback
-                    cache.set(lookup, pe.__class__(*pe.args))
-                    raise
-                else:
-                    cache.set(lookup, (value[0], value[1].copy()))
-                    return value
-            else:
-                ParserElement.packrat_cache_stats[HIT] += 1
-                if isinstance(value, Exception):
-                    raise value
-                return (value[0], value[1].copy())
-
-    _parse = _parseNoCache
-
-    @staticmethod
-    def resetCache():
-        ParserElement.packrat_cache.clear()
-        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
-
-    _packratEnabled = False
-    @staticmethod
-    def enablePackrat(cache_size_limit=128):
-        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
-           Repeated parse attempts at the same string location (which happens
-           often in many complex grammars) can immediately return a cached value,
-           instead of re-executing parsing/validating code.  Memoizing is done of
-           both valid results and parsing exceptions.
-           
-           Parameters:
-            - cache_size_limit - (default=C{128}) - if an integer value is provided
-              will limit the size of the packrat cache; if None is passed, then
-              the cache size will be unbounded; if 0 is passed, the cache will
-              be effectively disabled.
-            
-           This speedup may break existing programs that use parse actions that
-           have side-effects.  For this reason, packrat parsing is disabled when
-           you first import pyparsing.  To activate the packrat feature, your
-           program must call the class method C{ParserElement.enablePackrat()}.  If
-           your program uses C{psyco} to "compile as you go", you must call
-           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
-           Python will crash.  For best results, call C{enablePackrat()} immediately
-           after importing pyparsing.
-           
-           Example::
-               import pyparsing
-               pyparsing.ParserElement.enablePackrat()
-        """
-        if not ParserElement._packratEnabled:
-            ParserElement._packratEnabled = True
-            if cache_size_limit is None:
-                ParserElement.packrat_cache = ParserElement._UnboundedCache()
-            else:
-                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
-            ParserElement._parse = ParserElement._parseCache
-
-    def parseString( self, instring, parseAll=False ):
-        """
-        Execute the parse expression with the given string.
-        This is the main interface to the client code, once the complete
-        expression has been built.
-
-        If you want the grammar to require that the entire input string be
-        successfully parsed, then set C{parseAll} to True (equivalent to ending
-        the grammar with C{L{StringEnd()}}).
-
-        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
-        in order to report proper column numbers in parse actions.
-        If the input string contains tabs and
-        the grammar uses parse actions that use the C{loc} argument to index into the
-        string being parsed, you can ensure you have a consistent view of the input
-        string by:
-         - calling C{parseWithTabs} on your grammar before calling C{parseString}
-           (see L{I{parseWithTabs}})
-         - define your parse action using the full C{(s,loc,toks)} signature, and
-           reference the input string using the parse action's C{s} argument
-         - explictly expand the tabs in your input string before calling
-           C{parseString}
-        
-        Example::
-            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
-            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
-        """
-        ParserElement.resetCache()
-        if not self.streamlined:
-            self.streamline()
-            #~ self.saveAsList = True
-        for e in self.ignoreExprs:
-            e.streamline()
-        if not self.keepTabs:
-            instring = instring.expandtabs()
-        try:
-            loc, tokens = self._parse( instring, 0 )
-            if parseAll:
-                loc = self.preParse( instring, loc )
-                se = Empty() + StringEnd()
-                se._parse( instring, loc )
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-        else:
-            return tokens
-
-    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
-        """
-        Scan the input string for expression matches.  Each match will return the
-        matching tokens, start location, and end location.  May be called with optional
-        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
-        C{overlap} is specified, then overlapping matches will be reported.
-
-        Note that the start and end locations are reported relative to the string
-        being parsed.  See L{I{parseString}} for more information on parsing
-        strings with embedded tabs.
-
-        Example::
-            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
-            print(source)
-            for tokens,start,end in Word(alphas).scanString(source):
-                print(' '*start + '^'*(end-start))
-                print(' '*start + tokens[0])
-        
-        prints::
-        
-            sldjf123lsdjjkf345sldkjf879lkjsfd987
-            ^^^^^
-            sldjf
-                    ^^^^^^^
-                    lsdjjkf
-                              ^^^^^^
-                              sldkjf
-                                       ^^^^^^
-                                       lkjsfd
-        """
-        if not self.streamlined:
-            self.streamline()
-        for e in self.ignoreExprs:
-            e.streamline()
-
-        if not self.keepTabs:
-            instring = _ustr(instring).expandtabs()
-        instrlen = len(instring)
-        loc = 0
-        preparseFn = self.preParse
-        parseFn = self._parse
-        ParserElement.resetCache()
-        matches = 0
-        try:
-            while loc <= instrlen and matches < maxMatches:
-                try:
-                    preloc = preparseFn( instring, loc )
-                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
-                except ParseException:
-                    loc = preloc+1
-                else:
-                    if nextLoc > loc:
-                        matches += 1
-                        yield tokens, preloc, nextLoc
-                        if overlap:
-                            nextloc = preparseFn( instring, loc )
-                            if nextloc > loc:
-                                loc = nextLoc
-                            else:
-                                loc += 1
-                        else:
-                            loc = nextLoc
-                    else:
-                        loc = preloc+1
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-
-    def transformString( self, instring ):
-        """
-        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
-        be returned from a parse action.  To use C{transformString}, define a grammar and
-        attach a parse action to it that modifies the returned token list.
-        Invoking C{transformString()} on a target string will then scan for matches,
-        and replace the matched text patterns according to the logic in the parse
-        action.  C{transformString()} returns the resulting transformed string.
-        
-        Example::
-            wd = Word(alphas)
-            wd.setParseAction(lambda toks: toks[0].title())
-            
-            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
-        Prints::
-            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
-        """
-        out = []
-        lastE = 0
-        # force preservation of s, to minimize unwanted transformation of string, and to
-        # keep string locs straight between transformString and scanString
-        self.keepTabs = True
-        try:
-            for t,s,e in self.scanString( instring ):
-                out.append( instring[lastE:s] )
-                if t:
-                    if isinstance(t,ParseResults):
-                        out += t.asList()
-                    elif isinstance(t,list):
-                        out += t
-                    else:
-                        out.append(t)
-                lastE = e
-            out.append(instring[lastE:])
-            out = [o for o in out if o]
-            return "".join(map(_ustr,_flatten(out)))
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-
-    def searchString( self, instring, maxMatches=_MAX_INT ):
-        """
-        Another extension to C{L{scanString}}, simplifying the access to the tokens found
-        to match the given parse expression.  May be called with optional
-        C{maxMatches} argument, to clip searching after 'n' matches are found.
-        
-        Example::
-            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
-            cap_word = Word(alphas.upper(), alphas.lower())
-            
-            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
-
-            # the sum() builtin can be used to merge results into a single ParseResults object
-            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
-        prints::
-            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
-            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
-        """
-        try:
-            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-
-    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
-        """
-        Generator method to split a string using the given expression as a separator.
-        May be called with optional C{maxsplit} argument, to limit the number of splits;
-        and the optional C{includeSeparators} argument (default=C{False}), if the separating
-        matching text should be included in the split results.
-        
-        Example::        
-            punc = oneOf(list(".,;:/-!?"))
-            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
-        prints::
-            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
-        """
-        splits = 0
-        last = 0
-        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
-            yield instring[last:s]
-            if includeSeparators:
-                yield t[0]
-            last = e
-        yield instring[last:]
-
-    def __add__(self, other ):
-        """
-        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
-        converts them to L{Literal}s by default.
-        
-        Example::
-            greet = Word(alphas) + "," + Word(alphas) + "!"
-            hello = "Hello, World!"
-            print (hello, "->", greet.parseString(hello))
-        Prints::
-            Hello, World! -> ['Hello', ',', 'World', '!']
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return And( [ self, other ] )
-
-    def __radd__(self, other ):
-        """
-        Implementation of + operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other + self
-
-    def __sub__(self, other):
-        """
-        Implementation of - operator, returns C{L{And}} with error stop
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return self + And._ErrorStop() + other
-
-    def __rsub__(self, other ):
-        """
-        Implementation of - operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other - self
-
-    def __mul__(self,other):
-        """
-        Implementation of * operator, allows use of C{expr * 3} in place of
-        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
-        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
-        may also include C{None} as in:
-         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
-              to C{expr*n + L{ZeroOrMore}(expr)}
-              (read as "at least n instances of C{expr}")
-         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
-              (read as "0 to n instances of C{expr}")
-         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
-         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
-
-        Note that C{expr*(None,n)} does not raise an exception if
-        more than n exprs exist in the input stream; that is,
-        C{expr*(None,n)} does not enforce a maximum number of expr
-        occurrences.  If this behavior is desired, then write
-        C{expr*(None,n) + ~expr}
-        """
-        if isinstance(other,int):
-            minElements, optElements = other,0
-        elif isinstance(other,tuple):
-            other = (other + (None, None))[:2]
-            if other[0] is None:
-                other = (0, other[1])
-            if isinstance(other[0],int) and other[1] is None:
-                if other[0] == 0:
-                    return ZeroOrMore(self)
-                if other[0] == 1:
-                    return OneOrMore(self)
-                else:
-                    return self*other[0] + ZeroOrMore(self)
-            elif isinstance(other[0],int) and isinstance(other[1],int):
-                minElements, optElements = other
-                optElements -= minElements
-            else:
-                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
-        else:
-            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
-
-        if minElements < 0:
-            raise ValueError("cannot multiply ParserElement by negative value")
-        if optElements < 0:
-            raise ValueError("second tuple value must be greater or equal to first tuple value")
-        if minElements == optElements == 0:
-            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
-
-        if (optElements):
-            def makeOptionalList(n):
-                if n>1:
-                    return Optional(self + makeOptionalList(n-1))
-                else:
-                    return Optional(self)
-            if minElements:
-                if minElements == 1:
-                    ret = self + makeOptionalList(optElements)
-                else:
-                    ret = And([self]*minElements) + makeOptionalList(optElements)
-            else:
-                ret = makeOptionalList(optElements)
-        else:
-            if minElements == 1:
-                ret = self
-            else:
-                ret = And([self]*minElements)
-        return ret
-
-    def __rmul__(self, other):
-        return self.__mul__(other)
-
-    def __or__(self, other ):
-        """
-        Implementation of | operator - returns C{L{MatchFirst}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return MatchFirst( [ self, other ] )
-
-    def __ror__(self, other ):
-        """
-        Implementation of | operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other | self
-
-    def __xor__(self, other ):
-        """
-        Implementation of ^ operator - returns C{L{Or}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return Or( [ self, other ] )
-
-    def __rxor__(self, other ):
-        """
-        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other ^ self
-
-    def __and__(self, other ):
-        """
-        Implementation of & operator - returns C{L{Each}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return Each( [ self, other ] )
-
-    def __rand__(self, other ):
-        """
-        Implementation of & operator when left operand is not a C{L{ParserElement}}
-        """
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        if not isinstance( other, ParserElement ):
-            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
-                    SyntaxWarning, stacklevel=2)
-            return None
-        return other & self
-
-    def __invert__( self ):
-        """
-        Implementation of ~ operator - returns C{L{NotAny}}
-        """
-        return NotAny( self )
-
-    def __call__(self, name=None):
-        """
-        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
-        
-        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
-        passed as C{True}.
-           
-        If C{name} is omitted, same as calling C{L{copy}}.
-
-        Example::
-            # these are equivalent
-            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
-            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
-        """
-        if name is not None:
-            return self.setResultsName(name)
-        else:
-            return self.copy()
-
-    def suppress( self ):
-        """
-        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
-        cluttering up returned output.
-        """
-        return Suppress( self )
-
-    def leaveWhitespace( self ):
-        """
-        Disables the skipping of whitespace before matching the characters in the
-        C{ParserElement}'s defined pattern.  This is normally only used internally by
-        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
-        """
-        self.skipWhitespace = False
-        return self
-
-    def setWhitespaceChars( self, chars ):
-        """
-        Overrides the default whitespace chars
-        """
-        self.skipWhitespace = True
-        self.whiteChars = chars
-        self.copyDefaultWhiteChars = False
-        return self
-
-    def parseWithTabs( self ):
-        """
-        Overrides default behavior to expand C{}s to spaces before parsing the input string.
-        Must be called before C{parseString} when the input grammar contains elements that
-        match C{} characters.
-        """
-        self.keepTabs = True
-        return self
-
-    def ignore( self, other ):
-        """
-        Define expression to be ignored (e.g., comments) while doing pattern
-        matching; may be called repeatedly, to define multiple comment or other
-        ignorable patterns.
-        
-        Example::
-            patt = OneOrMore(Word(alphas))
-            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
-            
-            patt.ignore(cStyleComment)
-            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
-        """
-        if isinstance(other, basestring):
-            other = Suppress(other)
-
-        if isinstance( other, Suppress ):
-            if other not in self.ignoreExprs:
-                self.ignoreExprs.append(other)
-        else:
-            self.ignoreExprs.append( Suppress( other.copy() ) )
-        return self
-
-    def setDebugActions( self, startAction, successAction, exceptionAction ):
-        """
-        Enable display of debugging messages while doing pattern matching.
-        """
-        self.debugActions = (startAction or _defaultStartDebugAction,
-                             successAction or _defaultSuccessDebugAction,
-                             exceptionAction or _defaultExceptionDebugAction)
-        self.debug = True
-        return self
-
-    def setDebug( self, flag=True ):
-        """
-        Enable display of debugging messages while doing pattern matching.
-        Set C{flag} to True to enable, False to disable.
-
-        Example::
-            wd = Word(alphas).setName("alphaword")
-            integer = Word(nums).setName("numword")
-            term = wd | integer
-            
-            # turn on debugging for wd
-            wd.setDebug()
-
-            OneOrMore(term).parseString("abc 123 xyz 890")
-        
-        prints::
-            Match alphaword at loc 0(1,1)
-            Matched alphaword -> ['abc']
-            Match alphaword at loc 3(1,4)
-            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
-            Match alphaword at loc 7(1,8)
-            Matched alphaword -> ['xyz']
-            Match alphaword at loc 11(1,12)
-            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
-            Match alphaword at loc 15(1,16)
-            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
-        The output shown is that produced by the default debug actions - custom debug actions can be
-        specified using L{setDebugActions}. Prior to attempting
-        to match the C{wd} expression, the debugging message C{"Match  at loc (,)"}
-        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
-        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
-        which makes debugging and exception messages easier to understand - for instance, the default
-        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
-        """
-        if flag:
-            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
-        else:
-            self.debug = False
-        return self
-
-    def __str__( self ):
-        return self.name
-
-    def __repr__( self ):
-        return _ustr(self)
-
-    def streamline( self ):
-        self.streamlined = True
-        self.strRepr = None
-        return self
-
-    def checkRecursion( self, parseElementList ):
-        pass
-
-    def validate( self, validateTrace=[] ):
-        """
-        Check defined expressions for valid structure, check for infinite recursive definitions.
-        """
-        self.checkRecursion( [] )
-
-    def parseFile( self, file_or_filename, parseAll=False ):
-        """
-        Execute the parse expression on the given file or filename.
-        If a filename is specified (instead of a file object),
-        the entire file is opened, read, and closed before parsing.
-        """
-        try:
-            file_contents = file_or_filename.read()
-        except AttributeError:
-            with open(file_or_filename, "r") as f:
-                file_contents = f.read()
-        try:
-            return self.parseString(file_contents, parseAll)
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc
-
-    def __eq__(self,other):
-        if isinstance(other, ParserElement):
-            return self is other or vars(self) == vars(other)
-        elif isinstance(other, basestring):
-            return self.matches(other)
-        else:
-            return super(ParserElement,self)==other
-
-    def __ne__(self,other):
-        return not (self == other)
-
-    def __hash__(self):
-        return hash(id(self))
-
-    def __req__(self,other):
-        return self == other
-
-    def __rne__(self,other):
-        return not (self == other)
-
-    def matches(self, testString, parseAll=True):
-        """
-        Method for quick testing of a parser against a test string. Good for simple 
-        inline microtests of sub expressions while building up larger parser.
-           
-        Parameters:
-         - testString - to test against this expression for a match
-         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
-            
-        Example::
-            expr = Word(nums)
-            assert expr.matches("100")
-        """
-        try:
-            self.parseString(_ustr(testString), parseAll=parseAll)
-            return True
-        except ParseBaseException:
-            return False
-                
-    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
-        """
-        Execute the parse expression on a series of test strings, showing each
-        test, the parsed results or where the parse failed. Quick and easy way to
-        run a parse expression against a list of sample strings.
-           
-        Parameters:
-         - tests - a list of separate test strings, or a multiline string of test strings
-         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
-         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
-              string; pass None to disable comment filtering
-         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
-              if False, only dump nested list
-         - printResults - (default=C{True}) prints test output to stdout
-         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
-
-        Returns: a (success, results) tuple, where success indicates that all tests succeeded
-        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
-        test's output
-        
-        Example::
-            number_expr = pyparsing_common.number.copy()
-
-            result = number_expr.runTests('''
-                # unsigned integer
-                100
-                # negative integer
-                -100
-                # float with scientific notation
-                6.02e23
-                # integer with scientific notation
-                1e-12
-                ''')
-            print("Success" if result[0] else "Failed!")
-
-            result = number_expr.runTests('''
-                # stray character
-                100Z
-                # missing leading digit before '.'
-                -.100
-                # too many '.'
-                3.14.159
-                ''', failureTests=True)
-            print("Success" if result[0] else "Failed!")
-        prints::
-            # unsigned integer
-            100
-            [100]
-
-            # negative integer
-            -100
-            [-100]
-
-            # float with scientific notation
-            6.02e23
-            [6.02e+23]
-
-            # integer with scientific notation
-            1e-12
-            [1e-12]
-
-            Success
-            
-            # stray character
-            100Z
-               ^
-            FAIL: Expected end of text (at char 3), (line:1, col:4)
-
-            # missing leading digit before '.'
-            -.100
-            ^
-            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
-            # too many '.'
-            3.14.159
-                ^
-            FAIL: Expected end of text (at char 4), (line:1, col:5)
-
-            Success
-
-        Each test string must be on a single line. If you want to test a string that spans multiple
-        lines, create a test like this::
-
-            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
-        
-        (Note that this is a raw string literal, you must include the leading 'r'.)
-        """
-        if isinstance(tests, basestring):
-            tests = list(map(str.strip, tests.rstrip().splitlines()))
-        if isinstance(comment, basestring):
-            comment = Literal(comment)
-        allResults = []
-        comments = []
-        success = True
-        for t in tests:
-            if comment is not None and comment.matches(t, False) or comments and not t:
-                comments.append(t)
-                continue
-            if not t:
-                continue
-            out = ['\n'.join(comments), t]
-            comments = []
-            try:
-                t = t.replace(r'\n','\n')
-                result = self.parseString(t, parseAll=parseAll)
-                out.append(result.dump(full=fullDump))
-                success = success and not failureTests
-            except ParseBaseException as pe:
-                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
-                if '\n' in t:
-                    out.append(line(pe.loc, t))
-                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
-                else:
-                    out.append(' '*pe.loc + '^' + fatal)
-                out.append("FAIL: " + str(pe))
-                success = success and failureTests
-                result = pe
-            except Exception as exc:
-                out.append("FAIL-EXCEPTION: " + str(exc))
-                success = success and failureTests
-                result = exc
-
-            if printResults:
-                if fullDump:
-                    out.append('')
-                print('\n'.join(out))
-
-            allResults.append((t, result))
-        
-        return success, allResults
-
-        
-class Token(ParserElement):
-    """
-    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
-    """
-    def __init__( self ):
-        super(Token,self).__init__( savelist=False )
-
-
-class Empty(Token):
-    """
-    An empty token, will always match.
-    """
-    def __init__( self ):
-        super(Empty,self).__init__()
-        self.name = "Empty"
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-
-
-class NoMatch(Token):
-    """
-    A token that will never match.
-    """
-    def __init__( self ):
-        super(NoMatch,self).__init__()
-        self.name = "NoMatch"
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-        self.errmsg = "Unmatchable token"
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
-    """
-    Token to exactly match a specified string.
-    
-    Example::
-        Literal('blah').parseString('blah')  # -> ['blah']
-        Literal('blah').parseString('blahfooblah')  # -> ['blah']
-        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
-    
-    For case-insensitive matching, use L{CaselessLiteral}.
-    
-    For keyword matching (force word break before and after the matched string),
-    use L{Keyword} or L{CaselessKeyword}.
-    """
-    def __init__( self, matchString ):
-        super(Literal,self).__init__()
-        self.match = matchString
-        self.matchLen = len(matchString)
-        try:
-            self.firstMatchChar = matchString[0]
-        except IndexError:
-            warnings.warn("null string passed to Literal; use Empty() instead",
-                            SyntaxWarning, stacklevel=2)
-            self.__class__ = Empty
-        self.name = '"%s"' % _ustr(self.match)
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = False
-        self.mayIndexError = False
-
-    # Performance tuning: this routine gets called a *lot*
-    # if this is a single character match string  and the first character matches,
-    # short-circuit as quickly as possible, and avoid calling startswith
-    #~ @profile
-    def parseImpl( self, instring, loc, doActions=True ):
-        if (instring[loc] == self.firstMatchChar and
-            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
-            return loc+self.matchLen, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-_L = Literal
-ParserElement._literalStringClass = Literal
-
-class Keyword(Token):
-    """
-    Token to exactly match a specified string as a keyword, that is, it must be
-    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
-     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
-     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
-    Accepts two optional constructor arguments in addition to the keyword string:
-     - C{identChars} is a string of characters that would be valid identifier characters,
-          defaulting to all alphanumerics + "_" and "$"
-     - C{caseless} allows case-insensitive matching, default is C{False}.
-       
-    Example::
-        Keyword("start").parseString("start")  # -> ['start']
-        Keyword("start").parseString("starting")  # -> Exception
-
-    For case-insensitive matching, use L{CaselessKeyword}.
-    """
-    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
-
-    def __init__( self, matchString, identChars=None, caseless=False ):
-        super(Keyword,self).__init__()
-        if identChars is None:
-            identChars = Keyword.DEFAULT_KEYWORD_CHARS
-        self.match = matchString
-        self.matchLen = len(matchString)
-        try:
-            self.firstMatchChar = matchString[0]
-        except IndexError:
-            warnings.warn("null string passed to Keyword; use Empty() instead",
-                            SyntaxWarning, stacklevel=2)
-        self.name = '"%s"' % self.match
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = False
-        self.mayIndexError = False
-        self.caseless = caseless
-        if caseless:
-            self.caselessmatch = matchString.upper()
-            identChars = identChars.upper()
-        self.identChars = set(identChars)
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.caseless:
-            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
-                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
-                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
-                return loc+self.matchLen, self.match
-        else:
-            if (instring[loc] == self.firstMatchChar and
-                (self.matchLen==1 or instring.startswith(self.match,loc)) and
-                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
-                (loc == 0 or instring[loc-1] not in self.identChars) ):
-                return loc+self.matchLen, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-
-    def copy(self):
-        c = super(Keyword,self).copy()
-        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
-        return c
-
-    @staticmethod
-    def setDefaultKeywordChars( chars ):
-        """Overrides the default Keyword chars
-        """
-        Keyword.DEFAULT_KEYWORD_CHARS = chars
-
-class CaselessLiteral(Literal):
-    """
-    Token to match a specified string, ignoring case of letters.
-    Note: the matched results will always be in the case of the given
-    match string, NOT the case of the input text.
-
-    Example::
-        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
-        
-    (Contrast with example for L{CaselessKeyword}.)
-    """
-    def __init__( self, matchString ):
-        super(CaselessLiteral,self).__init__( matchString.upper() )
-        # Preserve the defining literal.
-        self.returnString = matchString
-        self.name = "'%s'" % self.returnString
-        self.errmsg = "Expected " + self.name
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if instring[ loc:loc+self.matchLen ].upper() == self.match:
-            return loc+self.matchLen, self.returnString
-        raise ParseException(instring, loc, self.errmsg, self)
-
-class CaselessKeyword(Keyword):
-    """
-    Caseless version of L{Keyword}.
-
-    Example::
-        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
-        
-    (Contrast with example for L{CaselessLiteral}.)
-    """
-    def __init__( self, matchString, identChars=None ):
-        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
-             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
-            return loc+self.matchLen, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-
-class CloseMatch(Token):
-    """
-    A variation on L{Literal} which matches "close" matches, that is, 
-    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
-     - C{match_string} - string to be matched
-     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
-    
-    The results from a successful parse will contain the matched text from the input string and the following named results:
-     - C{mismatches} - a list of the positions within the match_string where mismatches were found
-     - C{original} - the original match_string used to compare against the input string
-    
-    If C{mismatches} is an empty list, then the match was an exact match.
-    
-    Example::
-        patt = CloseMatch("ATCATCGAATGGA")
-        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
-        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
-        # exact match
-        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
-        # close match allowing up to 2 mismatches
-        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
-        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
-    """
-    def __init__(self, match_string, maxMismatches=1):
-        super(CloseMatch,self).__init__()
-        self.name = match_string
-        self.match_string = match_string
-        self.maxMismatches = maxMismatches
-        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
-        self.mayIndexError = False
-        self.mayReturnEmpty = False
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        start = loc
-        instrlen = len(instring)
-        maxloc = start + len(self.match_string)
-
-        if maxloc <= instrlen:
-            match_string = self.match_string
-            match_stringloc = 0
-            mismatches = []
-            maxMismatches = self.maxMismatches
-
-            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
-                src,mat = s_m
-                if src != mat:
-                    mismatches.append(match_stringloc)
-                    if len(mismatches) > maxMismatches:
-                        break
-            else:
-                loc = match_stringloc + 1
-                results = ParseResults([instring[start:loc]])
-                results['original'] = self.match_string
-                results['mismatches'] = mismatches
-                return loc, results
-
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
-    """
-    Token for matching words composed of allowed character sets.
-    Defined with string containing all allowed initial characters,
-    an optional string containing allowed body characters (if omitted,
-    defaults to the initial character set), and an optional minimum,
-    maximum, and/or exact length.  The default value for C{min} is 1 (a
-    minimum value < 1 is not valid); the default values for C{max} and C{exact}
-    are 0, meaning no maximum or exact length restriction. An optional
-    C{excludeChars} parameter can list characters that might be found in 
-    the input C{bodyChars} string; useful to define a word of all printables
-    except for one or two characters, for instance.
-    
-    L{srange} is useful for defining custom character set strings for defining 
-    C{Word} expressions, using range notation from regular expression character sets.
-    
-    A common mistake is to use C{Word} to match a specific literal string, as in 
-    C{Word("Address")}. Remember that C{Word} uses the string argument to define
-    I{sets} of matchable characters. This expression would match "Add", "AAA",
-    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
-    To match an exact literal string, use L{Literal} or L{Keyword}.
-
-    pyparsing includes helper strings for building Words:
-     - L{alphas}
-     - L{nums}
-     - L{alphanums}
-     - L{hexnums}
-     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
-     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
-     - L{printables} (any non-whitespace character)
-
-    Example::
-        # a word composed of digits
-        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-        
-        # a word with a leading capital, and zero or more lowercase
-        capital_word = Word(alphas.upper(), alphas.lower())
-
-        # hostnames are alphanumeric, with leading alpha, and '-'
-        hostname = Word(alphas, alphanums+'-')
-        
-        # roman numeral (not a strict parser, accepts invalid mix of characters)
-        roman = Word("IVXLCDM")
-        
-        # any string of non-whitespace characters, except for ','
-        csv_value = Word(printables, excludeChars=",")
-    """
-    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
-        super(Word,self).__init__()
-        if excludeChars:
-            initChars = ''.join(c for c in initChars if c not in excludeChars)
-            if bodyChars:
-                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
-        self.initCharsOrig = initChars
-        self.initChars = set(initChars)
-        if bodyChars :
-            self.bodyCharsOrig = bodyChars
-            self.bodyChars = set(bodyChars)
-        else:
-            self.bodyCharsOrig = initChars
-            self.bodyChars = set(initChars)
-
-        self.maxSpecified = max > 0
-
-        if min < 1:
-            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.asKeyword = asKeyword
-
-        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
-            if self.bodyCharsOrig == self.initCharsOrig:
-                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
-            elif len(self.initCharsOrig) == 1:
-                self.reString = "%s[%s]*" % \
-                                      (re.escape(self.initCharsOrig),
-                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
-            else:
-                self.reString = "[%s][%s]*" % \
-                                      (_escapeRegexRangeChars(self.initCharsOrig),
-                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
-            if self.asKeyword:
-                self.reString = r"\b"+self.reString+r"\b"
-            try:
-                self.re = re.compile( self.reString )
-            except Exception:
-                self.re = None
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.re:
-            result = self.re.match(instring,loc)
-            if not result:
-                raise ParseException(instring, loc, self.errmsg, self)
-
-            loc = result.end()
-            return loc, result.group()
-
-        if not(instring[ loc ] in self.initChars):
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        start = loc
-        loc += 1
-        instrlen = len(instring)
-        bodychars = self.bodyChars
-        maxloc = start + self.maxLen
-        maxloc = min( maxloc, instrlen )
-        while loc < maxloc and instring[loc] in bodychars:
-            loc += 1
-
-        throwException = False
-        if loc - start < self.minLen:
-            throwException = True
-        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
-            throwException = True
-        if self.asKeyword:
-            if (start>0 and instring[start-1] in bodychars) or (loc4:
-                    return s[:4]+"..."
-                else:
-                    return s
-
-            if ( self.initCharsOrig != self.bodyCharsOrig ):
-                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
-            else:
-                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
-
-        return self.strRepr
-
-
-class Regex(Token):
-    r"""
-    Token for matching strings that match a given regular expression.
-    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
-    If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as 
-    named parse results.
-
-    Example::
-        realnum = Regex(r"[+-]?\d+\.\d*")
-        date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
-        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
-        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
-    """
-    compiledREtype = type(re.compile("[A-Z]"))
-    def __init__( self, pattern, flags=0):
-        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
-        super(Regex,self).__init__()
-
-        if isinstance(pattern, basestring):
-            if not pattern:
-                warnings.warn("null string passed to Regex; use Empty() instead",
-                        SyntaxWarning, stacklevel=2)
-
-            self.pattern = pattern
-            self.flags = flags
-
-            try:
-                self.re = re.compile(self.pattern, self.flags)
-                self.reString = self.pattern
-            except sre_constants.error:
-                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
-                    SyntaxWarning, stacklevel=2)
-                raise
-
-        elif isinstance(pattern, Regex.compiledREtype):
-            self.re = pattern
-            self.pattern = \
-            self.reString = str(pattern)
-            self.flags = flags
-            
-        else:
-            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        result = self.re.match(instring,loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        d = result.groupdict()
-        ret = ParseResults(result.group())
-        if d:
-            for k in d:
-                ret[k] = d[k]
-        return loc,ret
-
-    def __str__( self ):
-        try:
-            return super(Regex,self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None:
-            self.strRepr = "Re:(%s)" % repr(self.pattern)
-
-        return self.strRepr
-
-
-class QuotedString(Token):
-    r"""
-    Token for matching strings that are delimited by quoting characters.
-    
-    Defined with the following parameters:
-        - quoteChar - string of one or more characters defining the quote delimiting string
-        - escChar - character to escape quotes, typically backslash (default=C{None})
-        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
-        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
-        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
-        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
-        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
-
-    Example::
-        qs = QuotedString('"')
-        print(qs.searchString('lsjdf "This is the quote" sldjf'))
-        complex_qs = QuotedString('{{', endQuoteChar='}}')
-        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
-        sql_qs = QuotedString('"', escQuote='""')
-        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
-    prints::
-        [['This is the quote']]
-        [['This is the "quote"']]
-        [['This is the quote with "embedded" quotes']]
-    """
-    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
-        super(QuotedString,self).__init__()
-
-        # remove white space from quote chars - wont work anyway
-        quoteChar = quoteChar.strip()
-        if not quoteChar:
-            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
-            raise SyntaxError()
-
-        if endQuoteChar is None:
-            endQuoteChar = quoteChar
-        else:
-            endQuoteChar = endQuoteChar.strip()
-            if not endQuoteChar:
-                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
-                raise SyntaxError()
-
-        self.quoteChar = quoteChar
-        self.quoteCharLen = len(quoteChar)
-        self.firstQuoteChar = quoteChar[0]
-        self.endQuoteChar = endQuoteChar
-        self.endQuoteCharLen = len(endQuoteChar)
-        self.escChar = escChar
-        self.escQuote = escQuote
-        self.unquoteResults = unquoteResults
-        self.convertWhitespaceEscapes = convertWhitespaceEscapes
-
-        if multiline:
-            self.flags = re.MULTILINE | re.DOTALL
-            self.pattern = r'%s(?:[^%s%s]' % \
-                ( re.escape(self.quoteChar),
-                  _escapeRegexRangeChars(self.endQuoteChar[0]),
-                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
-        else:
-            self.flags = 0
-            self.pattern = r'%s(?:[^%s\n\r%s]' % \
-                ( re.escape(self.quoteChar),
-                  _escapeRegexRangeChars(self.endQuoteChar[0]),
-                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
-        if len(self.endQuoteChar) > 1:
-            self.pattern += (
-                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
-                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
-                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
-                )
-        if escQuote:
-            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
-        if escChar:
-            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
-            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
-        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
-
-        try:
-            self.re = re.compile(self.pattern, self.flags)
-            self.reString = self.pattern
-        except sre_constants.error:
-            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
-                SyntaxWarning, stacklevel=2)
-            raise
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = result.group()
-
-        if self.unquoteResults:
-
-            # strip off quotes
-            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
-
-            if isinstance(ret,basestring):
-                # replace escaped whitespace
-                if '\\' in ret and self.convertWhitespaceEscapes:
-                    ws_map = {
-                        r'\t' : '\t',
-                        r'\n' : '\n',
-                        r'\f' : '\f',
-                        r'\r' : '\r',
-                    }
-                    for wslit,wschar in ws_map.items():
-                        ret = ret.replace(wslit, wschar)
-
-                # replace escaped characters
-                if self.escChar:
-                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
-
-                # replace escaped quotes
-                if self.escQuote:
-                    ret = ret.replace(self.escQuote, self.endQuoteChar)
-
-        return loc, ret
-
-    def __str__( self ):
-        try:
-            return super(QuotedString,self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None:
-            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
-
-        return self.strRepr
-
-
-class CharsNotIn(Token):
-    """
-    Token for matching words composed of characters I{not} in a given set (will
-    include whitespace in matched characters if not listed in the provided exclusion set - see example).
-    Defined with string containing all disallowed characters, and an optional
-    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
-    minimum value < 1 is not valid); the default values for C{max} and C{exact}
-    are 0, meaning no maximum or exact length restriction.
-
-    Example::
-        # define a comma-separated-value as anything that is not a ','
-        csv_value = CharsNotIn(',')
-        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
-    prints::
-        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
-    """
-    def __init__( self, notChars, min=1, max=0, exact=0 ):
-        super(CharsNotIn,self).__init__()
-        self.skipWhitespace = False
-        self.notChars = notChars
-
-        if min < 1:
-            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-        self.name = _ustr(self)
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = ( self.minLen == 0 )
-        self.mayIndexError = False
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if instring[loc] in self.notChars:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        start = loc
-        loc += 1
-        notchars = self.notChars
-        maxlen = min( start+self.maxLen, len(instring) )
-        while loc < maxlen and \
-              (instring[loc] not in notchars):
-            loc += 1
-
-        if loc - start < self.minLen:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        return loc, instring[start:loc]
-
-    def __str__( self ):
-        try:
-            return super(CharsNotIn, self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None:
-            if len(self.notChars) > 4:
-                self.strRepr = "!W:(%s...)" % self.notChars[:4]
-            else:
-                self.strRepr = "!W:(%s)" % self.notChars
-
-        return self.strRepr
-
-class White(Token):
-    """
-    Special matching class for matching whitespace.  Normally, whitespace is ignored
-    by pyparsing grammars.  This class is included when some whitespace structures
-    are significant.  Define with a string containing the whitespace characters to be
-    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
-    as defined for the C{L{Word}} class.
-    """
-    whiteStrs = {
-        " " : "",
-        "\t": "",
-        "\n": "",
-        "\r": "",
-        "\f": "",
-        }
-    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
-        super(White,self).__init__()
-        self.matchWhite = ws
-        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
-        #~ self.leaveWhitespace()
-        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
-        self.mayReturnEmpty = True
-        self.errmsg = "Expected " + self.name
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if not(instring[ loc ] in self.matchWhite):
-            raise ParseException(instring, loc, self.errmsg, self)
-        start = loc
-        loc += 1
-        maxloc = start + self.maxLen
-        maxloc = min( maxloc, len(instring) )
-        while loc < maxloc and instring[loc] in self.matchWhite:
-            loc += 1
-
-        if loc - start < self.minLen:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        return loc, instring[start:loc]
-
-
-class _PositionToken(Token):
-    def __init__( self ):
-        super(_PositionToken,self).__init__()
-        self.name=self.__class__.__name__
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-
-class GoToColumn(_PositionToken):
-    """
-    Token to advance to a specific column of input text; useful for tabular report scraping.
-    """
-    def __init__( self, colno ):
-        super(GoToColumn,self).__init__()
-        self.col = colno
-
-    def preParse( self, instring, loc ):
-        if col(loc,instring) != self.col:
-            instrlen = len(instring)
-            if self.ignoreExprs:
-                loc = self._skipIgnorables( instring, loc )
-            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
-                loc += 1
-        return loc
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        thiscol = col( loc, instring )
-        if thiscol > self.col:
-            raise ParseException( instring, loc, "Text not in expected column", self )
-        newloc = loc + self.col - thiscol
-        ret = instring[ loc: newloc ]
-        return newloc, ret
-
-
-class LineStart(_PositionToken):
-    """
-    Matches if current position is at the beginning of a line within the parse string
-    
-    Example::
-    
-        test = '''\
-        AAA this line
-        AAA and this line
-          AAA but not this one
-        B AAA and definitely not this one
-        '''
-
-        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
-            print(t)
-    
-    Prints::
-        ['AAA', ' this line']
-        ['AAA', ' and this line']    
-
-    """
-    def __init__( self ):
-        super(LineStart,self).__init__()
-        self.errmsg = "Expected start of line"
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if col(loc, instring) == 1:
-            return loc, []
-        raise ParseException(instring, loc, self.errmsg, self)
-
-class LineEnd(_PositionToken):
-    """
-    Matches if current position is at the end of a line within the parse string
-    """
-    def __init__( self ):
-        super(LineEnd,self).__init__()
-        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
-        self.errmsg = "Expected end of line"
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if loc len(instring):
-            return loc, []
-        else:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-class WordStart(_PositionToken):
-    """
-    Matches if the current position is at the beginning of a Word, and
-    is not preceded by any character in a given set of C{wordChars}
-    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
-    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
-    the string being parsed, or at the beginning of a line.
-    """
-    def __init__(self, wordChars = printables):
-        super(WordStart,self).__init__()
-        self.wordChars = set(wordChars)
-        self.errmsg = "Not at the start of a word"
-
-    def parseImpl(self, instring, loc, doActions=True ):
-        if loc != 0:
-            if (instring[loc-1] in self.wordChars or
-                instring[loc] not in self.wordChars):
-                raise ParseException(instring, loc, self.errmsg, self)
-        return loc, []
-
-class WordEnd(_PositionToken):
-    """
-    Matches if the current position is at the end of a Word, and
-    is not followed by any character in a given set of C{wordChars}
-    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
-    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
-    the string being parsed, or at the end of a line.
-    """
-    def __init__(self, wordChars = printables):
-        super(WordEnd,self).__init__()
-        self.wordChars = set(wordChars)
-        self.skipWhitespace = False
-        self.errmsg = "Not at the end of a word"
-
-    def parseImpl(self, instring, loc, doActions=True ):
-        instrlen = len(instring)
-        if instrlen>0 and loc maxExcLoc:
-                    maxException = err
-                    maxExcLoc = err.loc
-            except IndexError:
-                if len(instring) > maxExcLoc:
-                    maxException = ParseException(instring,len(instring),e.errmsg,self)
-                    maxExcLoc = len(instring)
-            else:
-                # save match among all matches, to retry longest to shortest
-                matches.append((loc2, e))
-
-        if matches:
-            matches.sort(key=lambda x: -x[0])
-            for _,e in matches:
-                try:
-                    return e._parse( instring, loc, doActions )
-                except ParseException as err:
-                    err.__traceback__ = None
-                    if err.loc > maxExcLoc:
-                        maxException = err
-                        maxExcLoc = err.loc
-
-        if maxException is not None:
-            maxException.msg = self.errmsg
-            raise maxException
-        else:
-            raise ParseException(instring, loc, "no defined alternatives to match", self)
-
-
-    def __ixor__(self, other ):
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        return self.append( other ) #Or( [ self, other ] )
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
-
-        return self.strRepr
-
-    def checkRecursion( self, parseElementList ):
-        subRecCheckList = parseElementList[:] + [ self ]
-        for e in self.exprs:
-            e.checkRecursion( subRecCheckList )
-
-
-class MatchFirst(ParseExpression):
-    """
-    Requires that at least one C{ParseExpression} is found.
-    If two expressions match, the first one listed is the one that will match.
-    May be constructed using the C{'|'} operator.
-
-    Example::
-        # construct MatchFirst using '|' operator
-        
-        # watch the order of expressions to match
-        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
-        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
-
-        # put more selective expression first
-        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
-        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
-    """
-    def __init__( self, exprs, savelist = False ):
-        super(MatchFirst,self).__init__(exprs, savelist)
-        if self.exprs:
-            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
-        else:
-            self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        maxExcLoc = -1
-        maxException = None
-        for e in self.exprs:
-            try:
-                ret = e._parse( instring, loc, doActions )
-                return ret
-            except ParseException as err:
-                if err.loc > maxExcLoc:
-                    maxException = err
-                    maxExcLoc = err.loc
-            except IndexError:
-                if len(instring) > maxExcLoc:
-                    maxException = ParseException(instring,len(instring),e.errmsg,self)
-                    maxExcLoc = len(instring)
-
-        # only got here if no expression matched, raise exception for match that made it the furthest
-        else:
-            if maxException is not None:
-                maxException.msg = self.errmsg
-                raise maxException
-            else:
-                raise ParseException(instring, loc, "no defined alternatives to match", self)
-
-    def __ior__(self, other ):
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass( other )
-        return self.append( other ) #MatchFirst( [ self, other ] )
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
-
-        return self.strRepr
-
-    def checkRecursion( self, parseElementList ):
-        subRecCheckList = parseElementList[:] + [ self ]
-        for e in self.exprs:
-            e.checkRecursion( subRecCheckList )
-
-
-class Each(ParseExpression):
-    """
-    Requires all given C{ParseExpression}s to be found, but in any order.
-    Expressions may be separated by whitespace.
-    May be constructed using the C{'&'} operator.
-
-    Example::
-        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
-        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
-        integer = Word(nums)
-        shape_attr = "shape:" + shape_type("shape")
-        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
-        color_attr = "color:" + color("color")
-        size_attr = "size:" + integer("size")
-
-        # use Each (using operator '&') to accept attributes in any order 
-        # (shape and posn are required, color and size are optional)
-        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
-
-        shape_spec.runTests('''
-            shape: SQUARE color: BLACK posn: 100, 120
-            shape: CIRCLE size: 50 color: BLUE posn: 50,80
-            color:GREEN size:20 shape:TRIANGLE posn:20,40
-            '''
-            )
-    prints::
-        shape: SQUARE color: BLACK posn: 100, 120
-        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
-        - color: BLACK
-        - posn: ['100', ',', '120']
-          - x: 100
-          - y: 120
-        - shape: SQUARE
-
-
-        shape: CIRCLE size: 50 color: BLUE posn: 50,80
-        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
-        - color: BLUE
-        - posn: ['50', ',', '80']
-          - x: 50
-          - y: 80
-        - shape: CIRCLE
-        - size: 50
-
-
-        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
-        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
-        - color: GREEN
-        - posn: ['20', ',', '40']
-          - x: 20
-          - y: 40
-        - shape: TRIANGLE
-        - size: 20
-    """
-    def __init__( self, exprs, savelist = True ):
-        super(Each,self).__init__(exprs, savelist)
-        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
-        self.skipWhitespace = True
-        self.initExprGroups = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.initExprGroups:
-            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
-            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
-            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
-            self.optionals = opt1 + opt2
-            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
-            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
-            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
-            self.required += self.multirequired
-            self.initExprGroups = False
-        tmpLoc = loc
-        tmpReqd = self.required[:]
-        tmpOpt  = self.optionals[:]
-        matchOrder = []
-
-        keepMatching = True
-        while keepMatching:
-            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
-            failed = []
-            for e in tmpExprs:
-                try:
-                    tmpLoc = e.tryParse( instring, tmpLoc )
-                except ParseException:
-                    failed.append(e)
-                else:
-                    matchOrder.append(self.opt1map.get(id(e),e))
-                    if e in tmpReqd:
-                        tmpReqd.remove(e)
-                    elif e in tmpOpt:
-                        tmpOpt.remove(e)
-            if len(failed) == len(tmpExprs):
-                keepMatching = False
-
-        if tmpReqd:
-            missing = ", ".join(_ustr(e) for e in tmpReqd)
-            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
-
-        # add any unmatched Optionals, in case they have default values defined
-        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
-
-        resultlist = []
-        for e in matchOrder:
-            loc,results = e._parse(instring,loc,doActions)
-            resultlist.append(results)
-
-        finalResults = sum(resultlist, ParseResults([]))
-        return loc, finalResults
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
-
-        return self.strRepr
-
-    def checkRecursion( self, parseElementList ):
-        subRecCheckList = parseElementList[:] + [ self ]
-        for e in self.exprs:
-            e.checkRecursion( subRecCheckList )
-
-
-class ParseElementEnhance(ParserElement):
-    """
-    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
-    """
-    def __init__( self, expr, savelist=False ):
-        super(ParseElementEnhance,self).__init__(savelist)
-        if isinstance( expr, basestring ):
-            if issubclass(ParserElement._literalStringClass, Token):
-                expr = ParserElement._literalStringClass(expr)
-            else:
-                expr = ParserElement._literalStringClass(Literal(expr))
-        self.expr = expr
-        self.strRepr = None
-        if expr is not None:
-            self.mayIndexError = expr.mayIndexError
-            self.mayReturnEmpty = expr.mayReturnEmpty
-            self.setWhitespaceChars( expr.whiteChars )
-            self.skipWhitespace = expr.skipWhitespace
-            self.saveAsList = expr.saveAsList
-            self.callPreparse = expr.callPreparse
-            self.ignoreExprs.extend(expr.ignoreExprs)
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.expr is not None:
-            return self.expr._parse( instring, loc, doActions, callPreParse=False )
-        else:
-            raise ParseException("",loc,self.errmsg,self)
-
-    def leaveWhitespace( self ):
-        self.skipWhitespace = False
-        self.expr = self.expr.copy()
-        if self.expr is not None:
-            self.expr.leaveWhitespace()
-        return self
-
-    def ignore( self, other ):
-        if isinstance( other, Suppress ):
-            if other not in self.ignoreExprs:
-                super( ParseElementEnhance, self).ignore( other )
-                if self.expr is not None:
-                    self.expr.ignore( self.ignoreExprs[-1] )
-        else:
-            super( ParseElementEnhance, self).ignore( other )
-            if self.expr is not None:
-                self.expr.ignore( self.ignoreExprs[-1] )
-        return self
-
-    def streamline( self ):
-        super(ParseElementEnhance,self).streamline()
-        if self.expr is not None:
-            self.expr.streamline()
-        return self
-
-    def checkRecursion( self, parseElementList ):
-        if self in parseElementList:
-            raise RecursiveGrammarException( parseElementList+[self] )
-        subRecCheckList = parseElementList[:] + [ self ]
-        if self.expr is not None:
-            self.expr.checkRecursion( subRecCheckList )
-
-    def validate( self, validateTrace=[] ):
-        tmp = validateTrace[:]+[self]
-        if self.expr is not None:
-            self.expr.validate(tmp)
-        self.checkRecursion( [] )
-
-    def __str__( self ):
-        try:
-            return super(ParseElementEnhance,self).__str__()
-        except Exception:
-            pass
-
-        if self.strRepr is None and self.expr is not None:
-            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
-        return self.strRepr
-
-
-class FollowedBy(ParseElementEnhance):
-    """
-    Lookahead matching of the given parse expression.  C{FollowedBy}
-    does I{not} advance the parsing position within the input string, it only
-    verifies that the specified parse expression matches at the current
-    position.  C{FollowedBy} always returns a null token list.
-
-    Example::
-        # use FollowedBy to match a label only if it is followed by a ':'
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-        
-        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
-    prints::
-        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
-    """
-    def __init__( self, expr ):
-        super(FollowedBy,self).__init__(expr)
-        self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        self.expr.tryParse( instring, loc )
-        return loc, []
-
-
-class NotAny(ParseElementEnhance):
-    """
-    Lookahead to disallow matching with the given parse expression.  C{NotAny}
-    does I{not} advance the parsing position within the input string, it only
-    verifies that the specified parse expression does I{not} match at the current
-    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
-    always returns a null token list.  May be constructed using the '~' operator.
-
-    Example::
-        
-    """
-    def __init__( self, expr ):
-        super(NotAny,self).__init__(expr)
-        #~ self.leaveWhitespace()
-        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
-        self.mayReturnEmpty = True
-        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        if self.expr.canParseNext(instring, loc):
-            raise ParseException(instring, loc, self.errmsg, self)
-        return loc, []
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "~{" + _ustr(self.expr) + "}"
-
-        return self.strRepr
-
-class _MultipleMatch(ParseElementEnhance):
-    def __init__( self, expr, stopOn=None):
-        super(_MultipleMatch, self).__init__(expr)
-        self.saveAsList = True
-        ender = stopOn
-        if isinstance(ender, basestring):
-            ender = ParserElement._literalStringClass(ender)
-        self.not_ender = ~ender if ender is not None else None
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        self_expr_parse = self.expr._parse
-        self_skip_ignorables = self._skipIgnorables
-        check_ender = self.not_ender is not None
-        if check_ender:
-            try_not_ender = self.not_ender.tryParse
-        
-        # must be at least one (but first see if we are the stopOn sentinel;
-        # if so, fail)
-        if check_ender:
-            try_not_ender(instring, loc)
-        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
-        try:
-            hasIgnoreExprs = (not not self.ignoreExprs)
-            while 1:
-                if check_ender:
-                    try_not_ender(instring, loc)
-                if hasIgnoreExprs:
-                    preloc = self_skip_ignorables( instring, loc )
-                else:
-                    preloc = loc
-                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
-                if tmptokens or tmptokens.haskeys():
-                    tokens += tmptokens
-        except (ParseException,IndexError):
-            pass
-
-        return loc, tokens
-        
-class OneOrMore(_MultipleMatch):
-    """
-    Repetition of one or more of the given expression.
-    
-    Parameters:
-     - expr - expression that must match one or more times
-     - stopOn - (default=C{None}) - expression for a terminating sentinel
-          (only required if the sentinel would ordinarily match the repetition 
-          expression)          
-
-    Example::
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
-        text = "shape: SQUARE posn: upper left color: BLACK"
-        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
-
-        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
-        
-        # could also be written as
-        (attr_expr * (1,)).parseString(text).pprint()
-    """
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "{" + _ustr(self.expr) + "}..."
-
-        return self.strRepr
-
-class ZeroOrMore(_MultipleMatch):
-    """
-    Optional repetition of zero or more of the given expression.
-    
-    Parameters:
-     - expr - expression that must match zero or more times
-     - stopOn - (default=C{None}) - expression for a terminating sentinel
-          (only required if the sentinel would ordinarily match the repetition 
-          expression)          
-
-    Example: similar to L{OneOrMore}
-    """
-    def __init__( self, expr, stopOn=None):
-        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
-        self.mayReturnEmpty = True
-        
-    def parseImpl( self, instring, loc, doActions=True ):
-        try:
-            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
-        except (ParseException,IndexError):
-            return loc, []
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "[" + _ustr(self.expr) + "]..."
-
-        return self.strRepr
-
-class _NullToken(object):
-    def __bool__(self):
-        return False
-    __nonzero__ = __bool__
-    def __str__(self):
-        return ""
-
-_optionalNotMatched = _NullToken()
-class Optional(ParseElementEnhance):
-    """
-    Optional matching of the given expression.
-
-    Parameters:
-     - expr - expression that must match zero or more times
-     - default (optional) - value to be returned if the optional expression is not found.
-
-    Example::
-        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
-        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
-        zip.runTests('''
-            # traditional ZIP code
-            12345
-            
-            # ZIP+4 form
-            12101-0001
-            
-            # invalid ZIP
-            98765-
-            ''')
-    prints::
-        # traditional ZIP code
-        12345
-        ['12345']
-
-        # ZIP+4 form
-        12101-0001
-        ['12101-0001']
-
-        # invalid ZIP
-        98765-
-             ^
-        FAIL: Expected end of text (at char 5), (line:1, col:6)
-    """
-    def __init__( self, expr, default=_optionalNotMatched ):
-        super(Optional,self).__init__( expr, savelist=False )
-        self.saveAsList = self.expr.saveAsList
-        self.defaultValue = default
-        self.mayReturnEmpty = True
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        try:
-            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
-        except (ParseException,IndexError):
-            if self.defaultValue is not _optionalNotMatched:
-                if self.expr.resultsName:
-                    tokens = ParseResults([ self.defaultValue ])
-                    tokens[self.expr.resultsName] = self.defaultValue
-                else:
-                    tokens = [ self.defaultValue ]
-            else:
-                tokens = []
-        return loc, tokens
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-
-        if self.strRepr is None:
-            self.strRepr = "[" + _ustr(self.expr) + "]"
-
-        return self.strRepr
-
-class SkipTo(ParseElementEnhance):
-    """
-    Token for skipping over all undefined text until the matched expression is found.
-
-    Parameters:
-     - expr - target expression marking the end of the data to be skipped
-     - include - (default=C{False}) if True, the target expression is also parsed 
-          (the skipped text and target expression are returned as a 2-element list).
-     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
-          comments) that might contain false matches to the target expression
-     - failOn - (default=C{None}) define expressions that are not allowed to be 
-          included in the skipped test; if found before the target expression is found, 
-          the SkipTo is not a match
-
-    Example::
-        report = '''
-            Outstanding Issues Report - 1 Jan 2000
-
-               # | Severity | Description                               |  Days Open
-            -----+----------+-------------------------------------------+-----------
-             101 | Critical | Intermittent system crash                 |          6
-              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
-              79 | Minor    | System slow when running too many reports |         47
-            '''
-        integer = Word(nums)
-        SEP = Suppress('|')
-        # use SkipTo to simply match everything up until the next SEP
-        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
-        # - parse action will call token.strip() for each matched token, i.e., the description body
-        string_data = SkipTo(SEP, ignore=quotedString)
-        string_data.setParseAction(tokenMap(str.strip))
-        ticket_expr = (integer("issue_num") + SEP 
-                      + string_data("sev") + SEP 
-                      + string_data("desc") + SEP 
-                      + integer("days_open"))
-        
-        for tkt in ticket_expr.searchString(report):
-            print tkt.dump()
-    prints::
-        ['101', 'Critical', 'Intermittent system crash', '6']
-        - days_open: 6
-        - desc: Intermittent system crash
-        - issue_num: 101
-        - sev: Critical
-        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
-        - days_open: 14
-        - desc: Spelling error on Login ('log|n')
-        - issue_num: 94
-        - sev: Cosmetic
-        ['79', 'Minor', 'System slow when running too many reports', '47']
-        - days_open: 47
-        - desc: System slow when running too many reports
-        - issue_num: 79
-        - sev: Minor
-    """
-    def __init__( self, other, include=False, ignore=None, failOn=None ):
-        super( SkipTo, self ).__init__( other )
-        self.ignoreExpr = ignore
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-        self.includeMatch = include
-        self.asList = False
-        if isinstance(failOn, basestring):
-            self.failOn = ParserElement._literalStringClass(failOn)
-        else:
-            self.failOn = failOn
-        self.errmsg = "No match found for "+_ustr(self.expr)
-
-    def parseImpl( self, instring, loc, doActions=True ):
-        startloc = loc
-        instrlen = len(instring)
-        expr = self.expr
-        expr_parse = self.expr._parse
-        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
-        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
-        
-        tmploc = loc
-        while tmploc <= instrlen:
-            if self_failOn_canParseNext is not None:
-                # break if failOn expression matches
-                if self_failOn_canParseNext(instring, tmploc):
-                    break
-                    
-            if self_ignoreExpr_tryParse is not None:
-                # advance past ignore expressions
-                while 1:
-                    try:
-                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
-                    except ParseBaseException:
-                        break
-            
-            try:
-                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
-            except (ParseException, IndexError):
-                # no match, advance loc in string
-                tmploc += 1
-            else:
-                # matched skipto expr, done
-                break
-
-        else:
-            # ran off the end of the input string without matching skipto expr, fail
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        # build up return values
-        loc = tmploc
-        skiptext = instring[startloc:loc]
-        skipresult = ParseResults(skiptext)
-        
-        if self.includeMatch:
-            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
-            skipresult += mat
-
-        return loc, skipresult
-
-class Forward(ParseElementEnhance):
-    """
-    Forward declaration of an expression to be defined later -
-    used for recursive grammars, such as algebraic infix notation.
-    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
-
-    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
-    Specifically, '|' has a lower precedence than '<<', so that::
-        fwdExpr << a | b | c
-    will actually be evaluated as::
-        (fwdExpr << a) | b | c
-    thereby leaving b and c out as parseable alternatives.  It is recommended that you
-    explicitly group the values inserted into the C{Forward}::
-        fwdExpr << (a | b | c)
-    Converting to use the '<<=' operator instead will avoid this problem.
-
-    See L{ParseResults.pprint} for an example of a recursive parser created using
-    C{Forward}.
-    """
-    def __init__( self, other=None ):
-        super(Forward,self).__init__( other, savelist=False )
-
-    def __lshift__( self, other ):
-        if isinstance( other, basestring ):
-            other = ParserElement._literalStringClass(other)
-        self.expr = other
-        self.strRepr = None
-        self.mayIndexError = self.expr.mayIndexError
-        self.mayReturnEmpty = self.expr.mayReturnEmpty
-        self.setWhitespaceChars( self.expr.whiteChars )
-        self.skipWhitespace = self.expr.skipWhitespace
-        self.saveAsList = self.expr.saveAsList
-        self.ignoreExprs.extend(self.expr.ignoreExprs)
-        return self
-        
-    def __ilshift__(self, other):
-        return self << other
-    
-    def leaveWhitespace( self ):
-        self.skipWhitespace = False
-        return self
-
-    def streamline( self ):
-        if not self.streamlined:
-            self.streamlined = True
-            if self.expr is not None:
-                self.expr.streamline()
-        return self
-
-    def validate( self, validateTrace=[] ):
-        if self not in validateTrace:
-            tmp = validateTrace[:]+[self]
-            if self.expr is not None:
-                self.expr.validate(tmp)
-        self.checkRecursion([])
-
-    def __str__( self ):
-        if hasattr(self,"name"):
-            return self.name
-        return self.__class__.__name__ + ": ..."
-
-        # stubbed out for now - creates awful memory and perf issues
-        self._revertClass = self.__class__
-        self.__class__ = _ForwardNoRecurse
-        try:
-            if self.expr is not None:
-                retString = _ustr(self.expr)
-            else:
-                retString = "None"
-        finally:
-            self.__class__ = self._revertClass
-        return self.__class__.__name__ + ": " + retString
-
-    def copy(self):
-        if self.expr is not None:
-            return super(Forward,self).copy()
-        else:
-            ret = Forward()
-            ret <<= self
-            return ret
-
-class _ForwardNoRecurse(Forward):
-    def __str__( self ):
-        return "..."
-
-class TokenConverter(ParseElementEnhance):
-    """
-    Abstract subclass of C{ParseExpression}, for converting parsed results.
-    """
-    def __init__( self, expr, savelist=False ):
-        super(TokenConverter,self).__init__( expr )#, savelist )
-        self.saveAsList = False
-
-class Combine(TokenConverter):
-    """
-    Converter to concatenate all matching tokens to a single string.
-    By default, the matching patterns must also be contiguous in the input string;
-    this can be disabled by specifying C{'adjacent=False'} in the constructor.
-
-    Example::
-        real = Word(nums) + '.' + Word(nums)
-        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
-        # will also erroneously match the following
-        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
-
-        real = Combine(Word(nums) + '.' + Word(nums))
-        print(real.parseString('3.1416')) # -> ['3.1416']
-        # no match when there are internal spaces
-        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
-    """
-    def __init__( self, expr, joinString="", adjacent=True ):
-        super(Combine,self).__init__( expr )
-        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
-        if adjacent:
-            self.leaveWhitespace()
-        self.adjacent = adjacent
-        self.skipWhitespace = True
-        self.joinString = joinString
-        self.callPreparse = True
-
-    def ignore( self, other ):
-        if self.adjacent:
-            ParserElement.ignore(self, other)
-        else:
-            super( Combine, self).ignore( other )
-        return self
-
-    def postParse( self, instring, loc, tokenlist ):
-        retToks = tokenlist.copy()
-        del retToks[:]
-        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
-
-        if self.resultsName and retToks.haskeys():
-            return [ retToks ]
-        else:
-            return retToks
-
-class Group(TokenConverter):
-    """
-    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
-
-    Example::
-        ident = Word(alphas)
-        num = Word(nums)
-        term = ident | num
-        func = ident + Optional(delimitedList(term))
-        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']
-
-        func = ident + Group(Optional(delimitedList(term)))
-        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
-    """
-    def __init__( self, expr ):
-        super(Group,self).__init__( expr )
-        self.saveAsList = True
-
-    def postParse( self, instring, loc, tokenlist ):
-        return [ tokenlist ]
-
-class Dict(TokenConverter):
-    """
-    Converter to return a repetitive expression as a list, but also as a dictionary.
-    Each element can also be referenced using the first token in the expression as its key.
-    Useful for tabular report scraping when the first column can be used as a item key.
-
-    Example::
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
-        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
-        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-        
-        # print attributes as plain groups
-        print(OneOrMore(attr_expr).parseString(text).dump())
-        
-        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
-        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
-        print(result.dump())
-        
-        # access named fields as dict entries, or output as dict
-        print(result['shape'])        
-        print(result.asDict())
-    prints::
-        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
-
-        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
-        - color: light blue
-        - posn: upper left
-        - shape: SQUARE
-        - texture: burlap
-        SQUARE
-        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
-    See more examples at L{ParseResults} of accessing fields by results name.
-    """
-    def __init__( self, expr ):
-        super(Dict,self).__init__( expr )
-        self.saveAsList = True
-
-    def postParse( self, instring, loc, tokenlist ):
-        for i,tok in enumerate(tokenlist):
-            if len(tok) == 0:
-                continue
-            ikey = tok[0]
-            if isinstance(ikey,int):
-                ikey = _ustr(tok[0]).strip()
-            if len(tok)==1:
-                tokenlist[ikey] = _ParseResultsWithOffset("",i)
-            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
-                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
-            else:
-                dictvalue = tok.copy() #ParseResults(i)
-                del dictvalue[0]
-                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
-                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
-                else:
-                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
-
-        if self.resultsName:
-            return [ tokenlist ]
-        else:
-            return tokenlist
-
-
-class Suppress(TokenConverter):
-    """
-    Converter for ignoring the results of a parsed expression.
-
-    Example::
-        source = "a, b, c,d"
-        wd = Word(alphas)
-        wd_list1 = wd + ZeroOrMore(',' + wd)
-        print(wd_list1.parseString(source))
-
-        # often, delimiters that are useful during parsing are just in the
-        # way afterward - use Suppress to keep them out of the parsed output
-        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
-        print(wd_list2.parseString(source))
-    prints::
-        ['a', ',', 'b', ',', 'c', ',', 'd']
-        ['a', 'b', 'c', 'd']
-    (See also L{delimitedList}.)
-    """
-    def postParse( self, instring, loc, tokenlist ):
-        return []
-
-    def suppress( self ):
-        return self
-
-
-class OnlyOnce(object):
-    """
-    Wrapper for parse actions, to ensure they are only called once.
-    """
-    def __init__(self, methodCall):
-        self.callable = _trim_arity(methodCall)
-        self.called = False
-    def __call__(self,s,l,t):
-        if not self.called:
-            results = self.callable(s,l,t)
-            self.called = True
-            return results
-        raise ParseException(s,l,"")
-    def reset(self):
-        self.called = False
-
-def traceParseAction(f):
-    """
-    Decorator for debugging parse actions. 
-    
-    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
-    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
-
-    Example::
-        wd = Word(alphas)
-
-        @traceParseAction
-        def remove_duplicate_chars(tokens):
-            return ''.join(sorted(set(''.join(tokens))))
-
-        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
-        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
-    prints::
-        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
-        <3:
-            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
-        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
-        try:
-            ret = f(*paArgs)
-        except Exception as exc:
-            sys.stderr.write( "< ['aa', 'bb', 'cc']
-        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
-    """
-    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
-    if combine:
-        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
-    else:
-        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
-
-def countedArray( expr, intExpr=None ):
-    """
-    Helper to define a counted list of expressions.
-    This helper defines a pattern of the form::
-        integer expr expr expr...
-    where the leading integer tells how many expr expressions follow.
-    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
-    
-    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
-
-    Example::
-        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']
-
-        # in this parser, the leading integer value is given in binary,
-        # '10' indicating that 2 values are in the array
-        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
-        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
-    """
-    arrayExpr = Forward()
-    def countFieldParseAction(s,l,t):
-        n = t[0]
-        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
-        return []
-    if intExpr is None:
-        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
-    else:
-        intExpr = intExpr.copy()
-    intExpr.setName("arrayLen")
-    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
-    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
-
-def _flatten(L):
-    ret = []
-    for i in L:
-        if isinstance(i,list):
-            ret.extend(_flatten(i))
-        else:
-            ret.append(i)
-    return ret
-
-def matchPreviousLiteral(expr):
-    """
-    Helper to define an expression that is indirectly defined from
-    the tokens matched in a previous expression, that is, it looks
-    for a 'repeat' of a previous expression.  For example::
-        first = Word(nums)
-        second = matchPreviousLiteral(first)
-        matchExpr = first + ":" + second
-    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
-    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
-    If this is not desired, use C{matchPreviousExpr}.
-    Do I{not} use with packrat parsing enabled.
-    """
-    rep = Forward()
-    def copyTokenToRepeater(s,l,t):
-        if t:
-            if len(t) == 1:
-                rep << t[0]
-            else:
-                # flatten t tokens
-                tflat = _flatten(t.asList())
-                rep << And(Literal(tt) for tt in tflat)
-        else:
-            rep << Empty()
-    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
-    rep.setName('(prev) ' + _ustr(expr))
-    return rep
-
-def matchPreviousExpr(expr):
-    """
-    Helper to define an expression that is indirectly defined from
-    the tokens matched in a previous expression, that is, it looks
-    for a 'repeat' of a previous expression.  For example::
-        first = Word(nums)
-        second = matchPreviousExpr(first)
-        matchExpr = first + ":" + second
-    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
-    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
-    the expressions are evaluated first, and then compared, so
-    C{"1"} is compared with C{"10"}.
-    Do I{not} use with packrat parsing enabled.
-    """
-    rep = Forward()
-    e2 = expr.copy()
-    rep <<= e2
-    def copyTokenToRepeater(s,l,t):
-        matchTokens = _flatten(t.asList())
-        def mustMatchTheseTokens(s,l,t):
-            theseTokens = _flatten(t.asList())
-            if  theseTokens != matchTokens:
-                raise ParseException("",0,"")
-        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
-    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
-    rep.setName('(prev) ' + _ustr(expr))
-    return rep
-
-def _escapeRegexRangeChars(s):
-    #~  escape these chars: ^-]
-    for c in r"\^-]":
-        s = s.replace(c,_bslash+c)
-    s = s.replace("\n",r"\n")
-    s = s.replace("\t",r"\t")
-    return _ustr(s)
-
-def oneOf( strs, caseless=False, useRegex=True ):
-    """
-    Helper to quickly define a set of alternative Literals, and makes sure to do
-    longest-first testing when there is a conflict, regardless of the input order,
-    but returns a C{L{MatchFirst}} for best performance.
-
-    Parameters:
-     - strs - a string of space-delimited literals, or a collection of string literals
-     - caseless - (default=C{False}) - treat all literals as caseless
-     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
-          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
-          if creating a C{Regex} raises an exception)
-
-    Example::
-        comp_oper = oneOf("< = > <= >= !=")
-        var = Word(alphas)
-        number = Word(nums)
-        term = var | number
-        comparison_expr = term + comp_oper + term
-        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
-    prints::
-        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
-    """
-    if caseless:
-        isequal = ( lambda a,b: a.upper() == b.upper() )
-        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
-        parseElementClass = CaselessLiteral
-    else:
-        isequal = ( lambda a,b: a == b )
-        masks = ( lambda a,b: b.startswith(a) )
-        parseElementClass = Literal
-
-    symbols = []
-    if isinstance(strs,basestring):
-        symbols = strs.split()
-    elif isinstance(strs, Iterable):
-        symbols = list(strs)
-    else:
-        warnings.warn("Invalid argument to oneOf, expected string or iterable",
-                SyntaxWarning, stacklevel=2)
-    if not symbols:
-        return NoMatch()
-
-    i = 0
-    while i < len(symbols)-1:
-        cur = symbols[i]
-        for j,other in enumerate(symbols[i+1:]):
-            if ( isequal(other, cur) ):
-                del symbols[i+j+1]
-                break
-            elif ( masks(cur, other) ):
-                del symbols[i+j+1]
-                symbols.insert(i,other)
-                cur = other
-                break
-        else:
-            i += 1
-
-    if not caseless and useRegex:
-        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
-        try:
-            if len(symbols)==len("".join(symbols)):
-                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
-            else:
-                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
-        except Exception:
-            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
-                    SyntaxWarning, stacklevel=2)
-
-
-    # last resort, just use MatchFirst
-    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
-
-def dictOf( key, value ):
-    """
-    Helper to easily and clearly define a dictionary by specifying the respective patterns
-    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
-    in the proper order.  The key pattern can include delimiting markers or punctuation,
-    as long as they are suppressed, thereby leaving the significant key text.  The value
-    pattern can include named results, so that the C{Dict} results can include named token
-    fields.
-
-    Example::
-        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
-        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-        print(OneOrMore(attr_expr).parseString(text).dump())
-        
-        attr_label = label
-        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
-
-        # similar to Dict, but simpler call format
-        result = dictOf(attr_label, attr_value).parseString(text)
-        print(result.dump())
-        print(result['shape'])
-        print(result.shape)  # object attribute access works too
-        print(result.asDict())
-    prints::
-        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
-        - color: light blue
-        - posn: upper left
-        - shape: SQUARE
-        - texture: burlap
-        SQUARE
-        SQUARE
-        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
-    """
-    return Dict( ZeroOrMore( Group ( key + value ) ) )
-
-def originalTextFor(expr, asString=True):
-    """
-    Helper to return the original, untokenized text for a given expression.  Useful to
-    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
-    revert separate tokens with intervening whitespace back to the original matching
-    input text. By default, returns astring containing the original parsed text.  
-       
-    If the optional C{asString} argument is passed as C{False}, then the return value is a 
-    C{L{ParseResults}} containing any results names that were originally matched, and a 
-    single token containing the original matched text from the input string.  So if 
-    the expression passed to C{L{originalTextFor}} contains expressions with defined
-    results names, you must set C{asString} to C{False} if you want to preserve those
-    results name values.
-
-    Example::
-        src = "this is test  bold text  normal text "
-        for tag in ("b","i"):
-            opener,closer = makeHTMLTags(tag)
-            patt = originalTextFor(opener + SkipTo(closer) + closer)
-            print(patt.searchString(src)[0])
-    prints::
-        [' bold text ']
-        ['text']
-    """
-    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
-    endlocMarker = locMarker.copy()
-    endlocMarker.callPreparse = False
-    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
-    if asString:
-        extractText = lambda s,l,t: s[t._original_start:t._original_end]
-    else:
-        def extractText(s,l,t):
-            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
-    matchExpr.setParseAction(extractText)
-    matchExpr.ignoreExprs = expr.ignoreExprs
-    return matchExpr
-
-def ungroup(expr): 
-    """
-    Helper to undo pyparsing's default grouping of And expressions, even
-    if all but one are non-empty.
-    """
-    return TokenConverter(expr).setParseAction(lambda t:t[0])
-
-def locatedExpr(expr):
-    """
-    Helper to decorate a returned token with its starting and ending locations in the input string.
-    This helper adds the following results names:
-     - locn_start = location where matched expression begins
-     - locn_end = location where matched expression ends
-     - value = the actual parsed results
-
-    Be careful if the input text contains C{} characters, you may want to call
-    C{L{ParserElement.parseWithTabs}}
-
-    Example::
-        wd = Word(alphas)
-        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
-            print(match)
-    prints::
-        [[0, 'ljsdf', 5]]
-        [[8, 'lksdjjf', 15]]
-        [[18, 'lkkjj', 23]]
-    """
-    locator = Empty().setParseAction(lambda s,l,t: l)
-    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
-
-
-# convenience constants for positional expressions
-empty       = Empty().setName("empty")
-lineStart   = LineStart().setName("lineStart")
-lineEnd     = LineEnd().setName("lineEnd")
-stringStart = StringStart().setName("stringStart")
-stringEnd   = StringEnd().setName("stringEnd")
-
-_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
-_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
-_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
-_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
-_charRange = Group(_singleChar + Suppress("-") + _singleChar)
-_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
-
-def srange(s):
-    r"""
-    Helper to easily define string ranges for use in Word construction.  Borrows
-    syntax from regexp '[]' string range definitions::
-        srange("[0-9]")   -> "0123456789"
-        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
-        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
-    The input string must be enclosed in []'s, and the returned string is the expanded
-    character set joined into a single string.
-    The values enclosed in the []'s may be:
-     - a single character
-     - an escaped character with a leading backslash (such as C{\-} or C{\]})
-     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
-         (C{\0x##} is also supported for backwards compatibility) 
-     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
-     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
-     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
-    """
-    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
-    try:
-        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
-    except Exception:
-        return ""
-
-def matchOnlyAtCol(n):
-    """
-    Helper method for defining parse actions that require matching at a specific
-    column in the input text.
-    """
-    def verifyCol(strg,locn,toks):
-        if col(locn,strg) != n:
-            raise ParseException(strg,locn,"matched token not at column %d" % n)
-    return verifyCol
-
-def replaceWith(replStr):
-    """
-    Helper method for common parse actions that simply return a literal value.  Especially
-    useful when used with C{L{transformString}()}.
-
-    Example::
-        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
-        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
-        term = na | num
-        
-        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
-    """
-    return lambda s,l,t: [replStr]
-
-def removeQuotes(s,l,t):
-    """
-    Helper parse action for removing quotation marks from parsed quoted strings.
-
-    Example::
-        # by default, quotation marks are included in parsed results
-        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
-        # use removeQuotes to strip quotation marks from parsed results
-        quotedString.setParseAction(removeQuotes)
-        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
-    """
-    return t[0][1:-1]
-
-def tokenMap(func, *args):
-    """
-    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
-    args are passed, they are forwarded to the given function as additional arguments after
-    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
-    parsed data to an integer using base 16.
-
-    Example (compare the last to example in L{ParserElement.transformString}::
-        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
-        hex_ints.runTests('''
-            00 11 22 aa FF 0a 0d 1a
-            ''')
-        
-        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
-        OneOrMore(upperword).runTests('''
-            my kingdom for a horse
-            ''')
-
-        wd = Word(alphas).setParseAction(tokenMap(str.title))
-        OneOrMore(wd).setParseAction(' '.join).runTests('''
-            now is the winter of our discontent made glorious summer by this sun of york
-            ''')
-    prints::
-        00 11 22 aa FF 0a 0d 1a
-        [0, 17, 34, 170, 255, 10, 13, 26]
-
-        my kingdom for a horse
-        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
-
-        now is the winter of our discontent made glorious summer by this sun of york
-        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
-    """
-    def pa(s,l,t):
-        return [func(tokn, *args) for tokn in t]
-
-    try:
-        func_name = getattr(func, '__name__', 
-                            getattr(func, '__class__').__name__)
-    except Exception:
-        func_name = str(func)
-    pa.__name__ = func_name
-
-    return pa
-
-upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
-"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
-
-downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
-"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
-    
-def _makeTags(tagStr, xml):
-    """Internal helper to construct opening and closing tag expressions, given a tag name"""
-    if isinstance(tagStr,basestring):
-        resname = tagStr
-        tagStr = Keyword(tagStr, caseless=not xml)
-    else:
-        resname = tagStr.name
-
-    tagAttrName = Word(alphas,alphanums+"_-:")
-    if (xml):
-        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
-        openTag = Suppress("<") + tagStr("tag") + \
-                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
-                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
-    else:
-        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
-        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
-        openTag = Suppress("<") + tagStr("tag") + \
-                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
-                Optional( Suppress("=") + tagAttrValue ) ))) + \
-                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
-    closeTag = Combine(_L("")
-
-    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
-    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % resname)
-    openTag.tag = resname
-    closeTag.tag = resname
-    return openTag, closeTag
-
-def makeHTMLTags(tagStr):
-    """
-    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
-    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
-
-    Example::
-        text = 'More info at the pyparsing wiki page'
-        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
-        a,a_end = makeHTMLTags("A")
-        link_expr = a + SkipTo(a_end)("link_text") + a_end
-        
-        for link in link_expr.searchString(text):
-            # attributes in the  tag (like "href" shown here) are also accessible as named results
-            print(link.link_text, '->', link.href)
-    prints::
-        pyparsing -> http://pyparsing.wikispaces.com
-    """
-    return _makeTags( tagStr, False )
-
-def makeXMLTags(tagStr):
-    """
-    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
-    tags only in the given upper/lower case.
-
-    Example: similar to L{makeHTMLTags}
-    """
-    return _makeTags( tagStr, True )
-
-def withAttribute(*args,**attrDict):
-    """
-    Helper to create a validating parse action to be used with start tags created
-    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
-    with a required attribute value, to avoid false matches on common tags such as
-    C{} or C{
}. - - Call C{withAttribute} with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in C{(align="right")}, or - - as an explicit dict with C{**} operator, when an attribute name is also a Python - reserved word, as in C{**{"class":"Customer", "align":"right"}} - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. - - If just testing for C{class} (with or without a namespace), use C{L{withClass}}. - - To verify that the attribute exists, but without specifying a value, pass - C{withAttribute.ANY_VALUE} as the value. - - Example:: - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
- - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - prints:: - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: - if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """ - Simplified version of C{L{withAttribute}} when matching on a div class - made - difficult because C{class} is a reserved word in Python. - - Example:: - html = ''' -
- Some text -
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
- - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - prints:: - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr : classname}) - -opAssoc = _Constants() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): - """ - Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. The generated parser will also recognize the use - of parentheses to override operator precedences (see example below). - - Note: if you define a deep operator list, you may see performance issues - when using infixNotation. See L{ParserElement.enablePackrat} for a - mechanism to potentially improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted); if the parse action - is passed a tuple or list of functions, this is equivalent to - calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) - - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) - - Example:: - # simple example of four-function arithmetic with ints and variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - prints:: - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - ret = Forward() - lastExpr = baseExpr | ( lpar + ret + rpar ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) - else: - matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) - else: - matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.setParseAction(*pa) - else: - matchExpr.setParseAction(pa) - thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """ - Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - - content - expression for items within the nested lists (default=C{None}) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the C{ignoreExpr} argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. - The default is L{quotedString}, but if no expressions are to be ignored, - then pass C{None} for this argument. - - Example:: - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR,RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - prints:: - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) - else: - ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - ret.setName('nested %s%s expression' % (opener,closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """ - Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. - - Parameters: - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single grammar - should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the - the current level; set to False for block of left-most statements - (default=C{True}) - - A valid block must contain at least one C{blockStatement}. - - Example:: - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group( funcDecl + func_body ) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << ( funcDef | assignment | identifier ) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - prints:: - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - def checkPeerIndent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") - - def checkSubIndent(s,l,t): - curCol = col(l,s) - if curCol > indentStack[-1]: - indentStack.append( curCol ) - else: - raise ParseException(s,l,"not a subentry") - - def checkUnindent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) - else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) -commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form C{/* ... */}" - -htmlComment = Regex(r"").setName("HTML comment") -"Comment of the form C{}" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form C{// ... (to end of line)}" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") -"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" - -javaStyleComment = cppStyleComment -"Same as C{L{cppStyleComment}}" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form C{# ... (to end of line)}" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + - Optional( Word(" \t") + - ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") -commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. - This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """ - Here are some common low-level expressions that may be useful in jump-starting parser development: - - numeric forms (L{integers}, L{reals}, L{scientific notation}) - - common L{programming identifiers} - - network addresses (L{MAC}, L{IPv4}, L{IPv6}) - - ISO8601 L{dates} and L{datetime} - - L{UUID} - - L{comma-separated list} - Parse actions: - - C{L{convertToInteger}} - - C{L{convertToFloat}} - - C{L{convertToDate}} - - C{L{convertToDatetime}} - - C{L{stripHTMLTags}} - - C{L{upcaseTokens}} - - C{L{downcaseTokens}} - - Example:: - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - prints:: - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas+'_', alphanums+'_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (C{0.0.0.0 - 255.255.255.255})" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) - - Example:: - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - prints:: - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """ - Helper to create a parse action for converting parsed datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) - - Example:: - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - prints:: - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (C{yyyy-mm-dd})" - - iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """ - Parse action to remove HTML tags from web page HTML source - - Example:: - # strip HTML links from normal text - text = 'More info at the
pyparsing wiki page' - td,td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - - print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') - + Optional( White(" \t") ) ) ).streamline().setName("commaItem") - comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/archive_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/archive_util.py deleted file mode 100755 index 0f70284..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/archive_util.py +++ /dev/null @@ -1,205 +0,0 @@ -"""Utilities for extracting common archive formats""" - -import zipfile -import tarfile -import os -import shutil -import posixpath -import contextlib -from distutils.errors import DistutilsError - -from pkg_resources import ensure_directory - -__all__ = [ - "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", - "UnrecognizedFormat", "extraction_drivers", "unpack_directory", -] - - -class UnrecognizedFormat(DistutilsError): - """Couldn't recognize the archive type""" - - -def default_filter(src, dst): - """The default progress/filter callback; returns True for all files""" - return dst - - -def unpack_archive( - filename, extract_dir, progress_filter=default_filter, - drivers=None): - """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` - - `progress_filter` is a function taking two arguments: a source path - internal to the archive ('/'-separated), and a filesystem path where it - will be extracted. The callback must return the desired extract path - (which may be the same as the one passed in), or else ``None`` to skip - that file or directory. The callback can thus be used to report on the - progress of the extraction, as well as to filter the items extracted or - alter their extraction paths. - - `drivers`, if supplied, must be a non-empty sequence of functions with the - same signature as this function (minus the `drivers` argument), that raise - ``UnrecognizedFormat`` if they do not support extracting the designated - archive type. The `drivers` are tried in sequence until one is found that - does not raise an error, or until all are exhausted (in which case - ``UnrecognizedFormat`` is raised). If you do not supply a sequence of - drivers, the module's ``extraction_drivers`` constant will be used, which - means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that - order. - """ - for driver in drivers or extraction_drivers: - try: - driver(filename, extract_dir, progress_filter) - except UnrecognizedFormat: - continue - else: - return - else: - raise UnrecognizedFormat( - "Not a recognized archive type: %s" % filename - ) - - -def unpack_directory(filename, extract_dir, progress_filter=default_filter): - """"Unpack" a directory, using the same interface as for archives - - Raises ``UnrecognizedFormat`` if `filename` is not a directory - """ - if not os.path.isdir(filename): - raise UnrecognizedFormat("%s is not a directory" % filename) - - paths = { - filename: ('', extract_dir), - } - for base, dirs, files in os.walk(filename): - src, dst = paths[base] - for d in dirs: - paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) - for f in files: - target = os.path.join(dst, f) - target = progress_filter(src + f, target) - if not target: - # skip non-files - continue - ensure_directory(target) - f = os.path.join(base, f) - shutil.copyfile(f, target) - shutil.copystat(f, target) - - -def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): - """Unpack zip `filename` to `extract_dir` - - Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined - by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation - of the `progress_filter` argument. - """ - - if not zipfile.is_zipfile(filename): - raise UnrecognizedFormat("%s is not a zip file" % (filename,)) - - with zipfile.ZipFile(filename) as z: - for info in z.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name.split('/'): - continue - - target = os.path.join(extract_dir, *name.split('/')) - target = progress_filter(name, target) - if not target: - continue - if name.endswith('/'): - # directory - ensure_directory(target) - else: - # file - ensure_directory(target) - data = z.read(info.filename) - with open(target, 'wb') as f: - f.write(data) - unix_attributes = info.external_attr >> 16 - if unix_attributes: - os.chmod(target, unix_attributes) - - -def _resolve_tar_file_or_dir(tar_obj, tar_member_obj): - """Resolve any links and extract link targets as normal files.""" - while tar_member_obj is not None and ( - tar_member_obj.islnk() or tar_member_obj.issym()): - linkpath = tar_member_obj.linkname - if tar_member_obj.issym(): - base = posixpath.dirname(tar_member_obj.name) - linkpath = posixpath.join(base, linkpath) - linkpath = posixpath.normpath(linkpath) - tar_member_obj = tar_obj._getmember(linkpath) - - is_file_or_dir = ( - tar_member_obj is not None and - (tar_member_obj.isfile() or tar_member_obj.isdir()) - ) - if is_file_or_dir: - return tar_member_obj - - raise LookupError('Got unknown file type') - - -def _iter_open_tar(tar_obj, extract_dir, progress_filter): - """Emit member-destination pairs from a tar archive.""" - # don't do any chowning! - tar_obj.chown = lambda *args: None - - with contextlib.closing(tar_obj): - for member in tar_obj: - name = member.name - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name.split('/'): - continue - - prelim_dst = os.path.join(extract_dir, *name.split('/')) - - try: - member = _resolve_tar_file_or_dir(tar_obj, member) - except LookupError: - continue - - final_dst = progress_filter(name, prelim_dst) - if not final_dst: - continue - - if final_dst.endswith(os.sep): - final_dst = final_dst[:-1] - - yield member, final_dst - - -def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): - """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` - - Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined - by ``tarfile.open()``). See ``unpack_archive()`` for an explanation - of the `progress_filter` argument. - """ - try: - tarobj = tarfile.open(filename) - except tarfile.TarError as e: - raise UnrecognizedFormat( - "%s is not a compressed or uncompressed tar file" % (filename,) - ) from e - - for member, final_dst in _iter_open_tar( - tarobj, extract_dir, progress_filter, - ): - try: - # XXX Ugh - tarobj._extract_member(member, final_dst) - except tarfile.ExtractError: - # chown/chmod/mkfifo/mknode/makedev failed - pass - - return True - - -extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/build_meta.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/build_meta.py deleted file mode 100755 index d0ac613..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/build_meta.py +++ /dev/null @@ -1,290 +0,0 @@ -"""A PEP 517 interface to setuptools - -Previously, when a user or a command line tool (let's call it a "frontend") -needed to make a request of setuptools to take a certain action, for -example, generating a list of installation requirements, the frontend would -would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. - -PEP 517 defines a different method of interfacing with setuptools. Rather -than calling "setup.py" directly, the frontend should: - - 1. Set the current directory to the directory with a setup.py file - 2. Import this module into a safe python interpreter (one in which - setuptools can potentially set global variables or crash hard). - 3. Call one of the functions defined in PEP 517. - -What each function does is defined in PEP 517. However, here is a "casual" -definition of the functions (this definition should not be relied on for -bug reports or API stability): - - - `build_wheel`: build a wheel in the folder and return the basename - - `get_requires_for_build_wheel`: get the `setup_requires` to build - - `prepare_metadata_for_build_wheel`: get the `install_requires` - - `build_sdist`: build an sdist in the folder and return the basename - - `get_requires_for_build_sdist`: get the `setup_requires` to build - -Again, this is not a formal definition! Just a "taste" of the module. -""" - -import io -import os -import sys -import tokenize -import shutil -import contextlib -import tempfile -import warnings - -import setuptools -import distutils - -from pkg_resources import parse_requirements - -__all__ = ['get_requires_for_build_sdist', - 'get_requires_for_build_wheel', - 'prepare_metadata_for_build_wheel', - 'build_wheel', - 'build_sdist', - '__legacy__', - 'SetupRequirementsError'] - - -class SetupRequirementsError(BaseException): - def __init__(self, specifiers): - self.specifiers = specifiers - - -class Distribution(setuptools.dist.Distribution): - def fetch_build_eggs(self, specifiers): - specifier_list = list(map(str, parse_requirements(specifiers))) - - raise SetupRequirementsError(specifier_list) - - @classmethod - @contextlib.contextmanager - def patch(cls): - """ - Replace - distutils.dist.Distribution with this class - for the duration of this context. - """ - orig = distutils.core.Distribution - distutils.core.Distribution = cls - try: - yield - finally: - distutils.core.Distribution = orig - - -@contextlib.contextmanager -def no_install_setup_requires(): - """Temporarily disable installing setup_requires - - Under PEP 517, the backend reports build dependencies to the frontend, - and the frontend is responsible for ensuring they're installed. - So setuptools (acting as a backend) should not try to install them. - """ - orig = setuptools._install_setup_requires - setuptools._install_setup_requires = lambda attrs: None - try: - yield - finally: - setuptools._install_setup_requires = orig - - -def _get_immediate_subdirectories(a_dir): - return [name for name in os.listdir(a_dir) - if os.path.isdir(os.path.join(a_dir, name))] - - -def _file_with_extension(directory, extension): - matching = ( - f for f in os.listdir(directory) - if f.endswith(extension) - ) - try: - file, = matching - except ValueError: - raise ValueError( - 'No distribution was found. Ensure that `setup.py` ' - 'is not empty and that it calls `setup()`.') - return file - - -def _open_setup_script(setup_script): - if not os.path.exists(setup_script): - # Supply a default setup.py - return io.StringIO(u"from setuptools import setup; setup()") - - return getattr(tokenize, 'open', open)(setup_script) - - -@contextlib.contextmanager -def suppress_known_deprecation(): - with warnings.catch_warnings(): - warnings.filterwarnings('ignore', 'setup.py install is deprecated') - yield - - -class _BuildMetaBackend(object): - - def _fix_config(self, config_settings): - config_settings = config_settings or {} - config_settings.setdefault('--global-option', []) - return config_settings - - def _get_build_requires(self, config_settings, requirements): - config_settings = self._fix_config(config_settings) - - sys.argv = sys.argv[:1] + ['egg_info'] + \ - config_settings["--global-option"] - try: - with Distribution.patch(): - self.run_setup() - except SetupRequirementsError as e: - requirements += e.specifiers - - return requirements - - def run_setup(self, setup_script='setup.py'): - # Note that we can reuse our build directory between calls - # Correctness comes first, then optimization later - __file__ = setup_script - __name__ = '__main__' - - with _open_setup_script(__file__) as f: - code = f.read().replace(r'\r\n', r'\n') - - exec(compile(code, __file__, 'exec'), locals()) - - def get_requires_for_build_wheel(self, config_settings=None): - config_settings = self._fix_config(config_settings) - return self._get_build_requires( - config_settings, requirements=['wheel']) - - def get_requires_for_build_sdist(self, config_settings=None): - config_settings = self._fix_config(config_settings) - return self._get_build_requires(config_settings, requirements=[]) - - def prepare_metadata_for_build_wheel(self, metadata_directory, - config_settings=None): - sys.argv = sys.argv[:1] + [ - 'dist_info', '--egg-base', metadata_directory] - with no_install_setup_requires(): - self.run_setup() - - dist_info_directory = metadata_directory - while True: - dist_infos = [f for f in os.listdir(dist_info_directory) - if f.endswith('.dist-info')] - - if ( - len(dist_infos) == 0 and - len(_get_immediate_subdirectories(dist_info_directory)) == 1 - ): - - dist_info_directory = os.path.join( - dist_info_directory, os.listdir(dist_info_directory)[0]) - continue - - assert len(dist_infos) == 1 - break - - # PEP 517 requires that the .dist-info directory be placed in the - # metadata_directory. To comply, we MUST copy the directory to the root - if dist_info_directory != metadata_directory: - shutil.move( - os.path.join(dist_info_directory, dist_infos[0]), - metadata_directory) - shutil.rmtree(dist_info_directory, ignore_errors=True) - - return dist_infos[0] - - def _build_with_temp_dir(self, setup_command, result_extension, - result_directory, config_settings): - config_settings = self._fix_config(config_settings) - result_directory = os.path.abspath(result_directory) - - # Build in a temporary directory, then copy to the target. - os.makedirs(result_directory, exist_ok=True) - with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir: - sys.argv = (sys.argv[:1] + setup_command + - ['--dist-dir', tmp_dist_dir] + - config_settings["--global-option"]) - with no_install_setup_requires(): - self.run_setup() - - result_basename = _file_with_extension( - tmp_dist_dir, result_extension) - result_path = os.path.join(result_directory, result_basename) - if os.path.exists(result_path): - # os.rename will fail overwriting on non-Unix. - os.remove(result_path) - os.rename(os.path.join(tmp_dist_dir, result_basename), result_path) - - return result_basename - - def build_wheel(self, wheel_directory, config_settings=None, - metadata_directory=None): - with suppress_known_deprecation(): - return self._build_with_temp_dir(['bdist_wheel'], '.whl', - wheel_directory, config_settings) - - def build_sdist(self, sdist_directory, config_settings=None): - return self._build_with_temp_dir(['sdist', '--formats', 'gztar'], - '.tar.gz', sdist_directory, - config_settings) - - -class _BuildMetaLegacyBackend(_BuildMetaBackend): - """Compatibility backend for setuptools - - This is a version of setuptools.build_meta that endeavors - to maintain backwards - compatibility with pre-PEP 517 modes of invocation. It - exists as a temporary - bridge between the old packaging mechanism and the new - packaging mechanism, - and will eventually be removed. - """ - def run_setup(self, setup_script='setup.py'): - # In order to maintain compatibility with scripts assuming that - # the setup.py script is in a directory on the PYTHONPATH, inject - # '' into sys.path. (pypa/setuptools#1642) - sys_path = list(sys.path) # Save the original path - - script_dir = os.path.dirname(os.path.abspath(setup_script)) - if script_dir not in sys.path: - sys.path.insert(0, script_dir) - - # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to - # get the directory of the source code. They expect it to refer to the - # setup.py script. - sys_argv_0 = sys.argv[0] - sys.argv[0] = setup_script - - try: - super(_BuildMetaLegacyBackend, - self).run_setup(setup_script=setup_script) - finally: - # While PEP 517 frontends should be calling each hook in a fresh - # subprocess according to the standard (and thus it should not be - # strictly necessary to restore the old sys.path), we'll restore - # the original path so that the path manipulation does not persist - # within the hook after run_setup is called. - sys.path[:] = sys_path - sys.argv[0] = sys_argv_0 - - -# The primary backend -_BACKEND = _BuildMetaBackend() - -get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel -get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist -prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel -build_wheel = _BACKEND.build_wheel -build_sdist = _BACKEND.build_sdist - - -# The legacy backend -__legacy__ = _BuildMetaLegacyBackend() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-32.exe b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-32.exe deleted file mode 100644 index b1487b7819e7286577a043c7726fbe0ca1543083..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65536 zcmeFae|%KMxj%k3yGc&ShO@v10t8qfC>m5WpovRhA=wa=z=p_%6%z1@blsvwI0vv2 zNIY4alVK~j)mwY3trY!Sy|tffZ$+^cObBMdpZutbN^PuECoa`kXb2K>zVBzw<_Fq) zU-$d^{_*|%@qt&)nVIv<%rnnC&oeX6JTqHy>n_PINs%4a-Xw9jfY!Ot@}WQUBkK=MqH|Mf{(O%J6=?F0E)R-u5-_q9XB5EmFjL zRMB1HZ7a&fd)b}0hpCKjVjS>G(qfxk>Uow`_J8Y;?6yo>h9td;lqFW`r_=Cu;je?@ zJ}aCeNvRaYzy7!6vsuJK8t7Ip04X137Vm)`v3N5I`@q}=|CK){8#_3 zR`1xV;$zJbJP0ppD|Paae;!F%bM?lxx2d-wfQV@O6ujTW-;jSkRCTolCLPMh2Nx=) zGP{NVA?TB&mP=FqZ|whc3RJSvJUJGyHOs!nBiePA7G%%m<=|b-UJ~!-boN$bi#jT{Hcy&A=Niq?KHpr`Y-?=MzKk{I zIl-)f*v>o`q`5M7OP+gKtTfLZsOCS(qPDr~x8=!_5`6-VLD0EMY5XaI$Uqq@V-Jap zR-V}6Ja=V~*CHdz@F4Rbij_JtwPEG;g{#zT!Uq*Py$3gDv`Z2tYF|X8 zYEi!^3#I2mi!9?8K!AuX>_C;=ltI=m5eE7*@I4UZ&p}=3ho&bc^h3P|C;`K|s)PJt z@!8GLOb})@Yp*SMou>fLhC@WZw%7ar>1Sm0aW&hPm&@Wqv5zi_&0GwOEjRhPMrYB*+WA64e$@ELiFO?ay?gvgcC1!dbl2?B=#{!9_2$Llg!~3%n@58CG`RW z1LPlkk=p2eFSa3N`&F?g@~A1mHitQyVq0yNK4^CN8joui^5gTpuf^0f+qMtEYVL?F z$fu`~#PaZA)VQ4Amx;XbZ%EJqQT~UlXZwx7HHW!>vn=MgCVU7v0(=qWSe%!~9KS(N zgLM=3LHzO$mU+*{wx!#)wXd#auhgvU=lF&*IVnT+hZ`~0nCHPOETKA3I;S!sQ8$^{ zZcv4UbEsTEpxvZ3yazYCQD1%G)vA+(ndH~oy5$RmDNA{h9?j)8QlvdBd-|V!63d!_ zr{P-1vS(7D+|itM9Rk61MnI+K~KhBa?C)KKh+E*p-K?e54p;H z-uNb0vkbWyR)1lbnp%G$OG`vjpo}PU*o}&pp;`PEODluTuiNcFBFmELneD_AsyG+G zkGm*r)oMJHmxrXL#=Plxfj%;6&nXBm)d`#6i)km>UtDzrb-*V{hPU&@;WB&3=+ zxL1-^s(vuM%+x$5wc!b>TMmX_2j=|8Kt*)b-4;r#_ff_ny|oEKpX@DE=!THWD9l;8 zEWjV=HO&BTAtLP*tp;IMlM0_Vn8(sUqI$?Nv_U1G^tEZC@of=jxa%BH_{Ai!MYo}y zE@)vjviC#f;TCVZ=HXtX$EDFgCrJNz+eAX#tsgc!-#{X?u;vu7>K}|6xr+Y+O$ixV zZ+D5)r){a?S581&?=jW!dQYD^njLNZDwQ49Kbq9~QJUTP@Z(p`mlCNjK7uj2dw$*y z?Fs@NOQ3Fcxb;G+-Z81QBhBuJS%CWlpf9gp&E>m+$xzI$NMcrT+APveYg4QEVhkj# zC+2qrf~MxI;{Q2Zk_`Xps%rkG7-Dkc{@y;QZ4Oz0#y`#fgd*BZP3DWK6>a+@*LD@EZXPo+Bl`5Zw>0+GLF5OFNogis^p(SM>i~SO7+N+7^b&-f@XG3hYwRL zs{rPg^&WTKXuZW1;J*Vf^E(^LEqH+VoqCH0;~Qle%pqFtZQVGjSX7wPu*PZbFwOi{ zG*lGy6QCZdX|wX?4#`^~>lfT8wQf{0k4{L2{|oR+{f=JfFn@0V9WOeR5QLU=M!U6~ zB7d(sirZ!)# z>Ws#2b>jJh;6zDv(pxgML&lgyPQ#zcbb!!sgpiDoqu{tG6%!Ja>nvz7KufAa>qaA# z=oV|HC9oE}Y-%~C<~B7KIy+)gcYDw!`k|a8<5gBx6?_n^Hfnl`YGk#JRXDw`Y3W5Z zF72K~Dqd=&sK!kRIocXZ$WcQ@HMx}F(UwwzM=dX^$J%??vDyuV3EiM+4QdBA;io zzdv6tSFL<#tQrIPdbG7F+JhObn}j(kln(mY$%K{!!5k#)1E ziz+3WTCrR!=CNXVR%|-O_{kh9N!CV3M%Px+KVv3eg)|H^tUYmMQB9Bbm&lY5uSRpgw1Z~T#cB&t&nSAs!Ug_}|kVHMz$WCS?l zqwD<1@hy6X9b^#7A}+?pyqY#|7U^Uy*X6#P>C%ujL9h3=b(@6wKWGF78?2)w89yy=;G^09Qy^}WR?(y1w&Cj}$@F5L2YsfEL<3pY z8Z-dF^8sAbhP4Aqi=v(obhDs>e#QftDyng66L`)T%)98HH5&8BFv2#E?5hTb_9 zH2mD~chFE=MQHmw0&)Lo6u2YqKeGV1@zG*g<1#Bwv#zb_%-_+JlMrxKd<~ir3Ze1+ zy(_eP6{~SYKhV+(S~~v~1yt)79UHaSeZ5h0^WBheRNU;+TO4|;1L|kljg`GxMRVY5 zgy-B?`L%XKbD$65%Wkaf(P<|yYD*~1E|lWFafIgb%{TqMMK!$}&wwd`weq~AJfD%@n)sU_ zUiHfyy0+TP&cgr)(wf;G1RCO$+F-8vOp> zOt(p4nn%&aNx*RFpHZMF4f(Ufvk=7?JRPMYo=R06O@dN!hp9(J{WAdZdPL@b!%!G% zLqHJ$fo+g=B{EqW3P?d+m=J67#;*QZ08JwbS`rFm!NrD0j{xSFfN^d-(+{H;KZnVO zq>c^Kn`akV>TQ^)nUX?$=?!SjnvZ-^xEv3@Td*3+ToB$GLi`Q1f1eLu;*Pvh0=OLj zdhtFgHl&UZQ-JSB8KgFySnsCLa+gvITEMT?_A^wxGy~aKk5P9rYN}h!*-ueoBA*hw4DFOr zciPZ8^v@j#d(UsI=5c%~N>l%e$W7+;ycJQ_!+(R9k!HS|Ec90*HCfot5kX%T)t%N- zi~Jqxa4NIzB;-ca!0JvWei7b)=I>ieG+2$PYbd;x;wr_LQoMggi&;CG;F7fIhG-(% zJ!c$nrEc$qdPCdkvnu1mRQk}y|2ztlU(w@aFd)D-lsL#-NVQSwulrLY!m_|0v*K-t zB7y%f8D%CG3s<7iT|s_@7ZVu%+>P|Sc?3OwD#DH8xgHD=>+Hq9%@@@^GtBaXR79?>LQ?^WZ#C z2`ni`a{1lFpInCsiUb$05edblZ^2mnBP=hXEp>8aJojRG7BaJEcKD<{j}yzhTP#U? z=Aa#XBtim8=Gg?r4Uj`5WN-&1pw{2h8%&)Z;9p{i7uubJoO^Qd2$-{7c$u@ERF>y& zqN~6wdfjPB!z|)D^aBs!k+_=q&oG%~7!{|m@ca2}v;&KPJ2>;78Umj~@P&9JSqLha zzlFYP<2&bKzVZaVB-Mc?2YHnu!LA|`O$fbh{3s#N;_-HA4$=p_MZ|rGufc4|OmzUu z^JPvljA~1&s$+AaZ>O zBaXr}qS-H-6;8gFl+j!hB|&HG__QCH?uAZY6+qd0>UH`KS<+@;OtPgV@|*2uh0NaK zb;wtOjM^yvHprtzb)z&!{3Y1&uQu2YF0;6 z-&pJkNPw~TIeP9tMbGFy@$3@M*Ts{I=TY%&5zoVT@~P)d6APo+yaISwqj*6}fd26l zSTkcVuiyVH03~%8i#~&ZzGlPMWCA!0Gf#IJR{FI;?gP_@en$)RA9elZzErW? z-z!$}DeP6T*8k_BYkgYiUq~IY)=yyvyM1}}O7uIRM!^y9drD&sLd~O$*hyeu#5%=0hc&P=2=ADrQtvtr8#<-kGZK>Z2~i+YDr(2b== zcR`DCps{r;k|OD?J&uqOeF)jSt;!F64YPom7yZ+9fQ}L6K;B(=8G8lk_6m~j6~x@z zCDMtQotu#j_2}HA-lTK8dcDqNby|73nvIwet;T0PM(}dy%>!Xa=e&Wit+N2(1_4tK zJ>Ho&@F}G;2jTj!uGD5=No4gi+tKUoGxifUO6&p|zC}*Q`Nt@!^HZd-C-c2srIvNJB1pwv_RV7Hs}lRAC|1y*^It@P6dqcjDCIs;$|7}n{a0bN zwEnC0YEJ!ETa@VSNVnP}A=G&bfqB1mb=`bXK5zVw9e>%7YwwQE9vvGOqVjDG&Y)-L5pEZIaIC zt1d9l3jE3Cjm|E(KL}PG`1?WOK18iyR zr@EEK-#D<=?b9-MKLq7qL@AMpXFN*8q(*e^0F2H-_4k1j+Inw(tI~Km%BD8|oIZZL z3U#LP!ouD_m~3*fC^b0{i;`Lh@J}(6VsVI}X;M5&;!2eyMl~<&Z4!WS0Y`~eMhmOX z*{Fz-wZUowjBH+3?(n{;&a#?E?5n&i88K>u>i%i|!DBr`8qsAZj-fVnlD&ENu7UOj zcr8tPJKsdI-m^h@@FMC~8b8KU@3}+S`I1Qgj`G7<7-#jKJJoyip1alQde8Ti=;Qd- zEqbZmLK{d(>TSv1K-&|`*$o3Y^LH_kih}8`ftlRO=24yNSd>_EospK1t)P)MNSMz5 zMFbXV!)H|iohdPqaK2TlCsdyXsw|yVJM_5R`8Fcji2AR-qupV#6XH@LR3unydzvBM z4f~1F_TbC*c}(zSLwgMXgM4Bpq**9!s9VzD=qH!e1;$?DRCY2k%qp0&7j#pf$VRk@ zJ}vAuqB{{t3Z*G@GUUh=QH+(oZ~6)oG_G zm7oW8n-SZG)I^@nHz|$JLoI;48x87n8XKNR#<&=^F9+-;eGV0gPPh}0%>uwt*&h7^ zikjIJeH*WM^eCR-1*y{y7<3vkDAAj#P zqW!0sNgW>q8t;8)$CzynZ~LYZ=TGX#rStC(HZCa)yTB3evmPy_-~(OswN&RE!Vcqf zp@Gi}J#;B+uy|&hmNr=+9n;P-K_62nm1xV3H2SPw#e|IhbXfof`+6|7-a1piP-HwN z7^H{2zdg+^sM$1pNn(G@e>T6pEQuKCV2I4dULmNrfxpt(oApIA)u1V4mx*V)ZKf|V zchNeer}=!|H??#5LN6WbNlX_CYfykKg_THOR9^_2FTwuZg0(8r_mh$V#aE#VnGn{e zeCl;DfP%p?tggB$k@J+TKa!uwd@4m9VSVvf-3M5SiBUWMu?`fM{}^?u#Rg7oj438} zF(JrR5f9(+cj98FDW)K7zZihT$5@OwgKx%nE3=G6vK4Y@Bde<-Gp$1S)m91meo|RL zn<`b;MO(K26BC3>4jV6|nK2@IAd(jIpM#El1d*~p8E?Q^LTFiSdXY#}J?38eXq6wU zILE&{2PF4XZYiYgP2}og_GW_ZL=T`a(o6hRfQ6D1w{88ns)Va232{Fagx$LRq%S0O zl)0Az+ySZ5pA=~!CT4ui_9ihZH^Qxh#U26>6Z7Hbqn#h2z5ie)Ybiu*0bt+kjg>s@ zjA{aix*=UiZ)(*qFTw&sYC@-?(l4s4*jzOJb5O{H-dahv}rm2DF96vkFyo8F5}t^)$F zZ(9oMi~Bo>vl1%_AO0!k4`R(0WECATr`T9CYDxmPlhFq~FmY!A0jT?5Z*B+?Z-mztE>vHrpWqH$Nq7 znQ$bS14=F3%*>!CDalr@dER`@@Y?!6d@*vxe+Ey;C zzAb-8pA`ZV>?nizOJLlY2g_U%w^_#AX+&7PCq<)De2EOb$F4aLln1f;?205wZvaM# zVFVXXgXYER?xJ1UNedWLbhw#43pHVVJOXQCT7oAT1xqP@drH6g1K{s|^C-D8~ zII-`VG_Cp(PnuTk%;)M~Y9hy;0G87Oi^b`fGFXmJv{=-iJc*G;s){U*MNc7w4PZX$ zFG5NYGosTWBeCdAJRx94bOr)R^%*-w;fF~?jmJo-7}k16tTxu|e7FZm>vqP@h}UDJ zMb_<%9ulu7Tg2PMX=bAQTgbqx%Agz--_|=gN^3-U*{nC`=`o*^BWB5aoD5zDc^L zbCPah$}ndW(fDOKfCnSmYs?O0|98q>)A^t1Kmi5fV)^NK<0K|?>Ztkpg{wAx87u#* zeqqFx;gPHrpt<9XQ}|ZXmRbrVBf~@9!{b|~w(2b~o%2V>(ripi+vjs*FBxfV+~`j# zwUV4ks{+SXmd9E1#@;j=6 z)uOkr_4gLM5-{%ICcH@ey-Dse{MZBUT1zu282Bo>*21v||3a&=U&8)UQ`x`eDO#(a z$+2t;o8*GowEI!b(%StdRN6V}iP(KElBg`U#9@D{z*)%O`vf>Iabn-XiXWl4ADbAC zbxL$JvcOIfTh5KDUbfOny8snu^oxD!YWTy%94p!42i&pJ2V91~3)1fIfdSdg-sO4d z0#s^?wrun5SjhZ6>?CT{-mI^K=Fel0?4c+GlPClQ3ODjHfx-kp8?Z8kIzIS{LZ2kPIYA1qR0t$ zn7?WzV-v+FcYYJ4Hb@syr5~l=QXFk8m(jW!w}53gPr_z=9*MvMv}fS8675hU*yDz=>Qxqp`&p8$PzafG z#m<%=%AZ_k$Zh6-SXSFN%1V}W(ZY$4no;C;s{g~%TEA5qZDWZ>Vk4~|HI(T3pO(1a zDly^=Z=limT__6dNkqFHhpOr_vsaOh;YYEgH_}4}xWc;# zn?;DgBeLc+Ou7F;1!12zVqb04b$E-(L8Pvlop1dlMRsXK7|7O2c;w@PH!A` z$}(qT%e{);@wHLrOr+~eoF4r(b2T#R>l_%jYgt>r>5{5}aWNyvNppn~*97@Ca5!n) zRB&u!64`2fsMa0iy>Oxm@QbJ?bpB*$d`r@}3#0zCM9#0Uq@}4Awna{XqNUUrOuWc% zslzKgZj_jgN(3Qdj%SMs)!HOMgJ?$SA5m?n;P?V#d2f=I&$4o7cdM>mQ?y*xMg;gx zgc(g7CW7dRu|;*V=I(Ayq5ilg`3a_A7|!c@Ic8!~S)viH$y!IUBc2WN3Q-Bvj^$c3 z5`_KmLmGEEV1Gd_1d=iz5E(tp!M007t}T351I#sty)U z+#Si`84w_Buz4?P3V#KB5SPf|6%DG44C5i97KEp0qBcViqnfK8ixAqFYTieA`GW(w zAaRLIV{Rh7ntx26`gie*R0Z-#Na;r%mD}%<5Jvs_7s90pggwVaNJy z;Gz5ncB#LFXNdQ_W-sV26M91L>)3KHxJ|5fbYYy!?SjKig2`8l{-`R#sJ z{y|JM;N@7?!z#|5{daszTz&pedK?9JQ8F;@qU0|0D_iceAI?7tSL#Z>U6e&#kwgbP zkkbtwSlf+Cu! z2^i*I1ua#Wv>X0&z_aSn73?s&*dqlVd-T@)W9p>J$FO7ZOZr;Fjpb*IiZ0VIdYQtLL z+vF=8tIkQ-iCW8@Pz=4^uQuJ=>}nca<}1w6IQAlU`d|lyHiM6o3qDTHh2A>nrl2_S zA+q^%P|?VQl|Hvwh66uk?P7j%C%U{@zVS76a{Yy?)f|yCw>|CZvLrN|l>4FS+vXAI zH~1Q@M_VFOIwyh-O%sQD3<-Z4nfz%+pMuT$dA}3f(Y)N_dKL78sm^jCQ2QJXENk|S6i>1Swe1^0VH!|z6vhVJ3d~qpZgqg? zzXJ`{qP%dJwHn(Uw4c1)+4_+yvo*He^{Zd~>O~p~F~0$D{+lmT#%8yz$>m$BosT^* z0nr20&}O%cv?bbkjJiUE8qVZG$Ol*3*xZhC4DtbUv%|~|qj@h=J~GK)1f2?6ni^AS zZU9&Mjpv%9p98c#N(mlVtgend_5~7@=MO8-+r5XkjLvWM1!50n(f5dF84tfLw0Q}( zm*9+g613dxj758q1+@iGGXVyKBgR-iD*K=c=}3jXt{(VYjZ9Vis|CbfrAYwv)gXY_ zQ4v6I3!prr+D<=J)7@%Qhu1Goo8W5RnM%bbM$r5yo02?~go2uOrV+Uka(kl)NYvB= ziJ(Qrc=R;N`2{d8IC6yuvxg}q);OGU*^kC<_2?JJZgJKx9*$a$VY4ft=wFT9f@+7O zj$`$od74}ad%Gmf_rA69AldC`VZZbwE$pF`3rQ)z)dl0=BiP1ZJ-dY$-og#)1bxSP zNgczsgfSnLVGH~D`xwSpJO32GZILW~7K4{qB>)7j@ZQ40L* znbhGjdU1BZa@I@C(fhvEMh*p00h0JY@9QPky)JkP4t`7= zqP*~?>!A&M*52zWqxiQFifLao4{wB9^g%?F=gS~0 zM>_u(!b6Igk78KGX%zF_BQvo$i2dd%>Ll%S;>zYS8{}-d^88%#^8m>@n(H6JN4eBH z0j1d%dV4m1hFL&aSv{tK$Ix%EF=8gH*LA?R>-5G>76)qa5?U!q{5zOkM$(KDXRO2( zGaf}bx2|K?&R=KDobU79gq@AE{9S-_z5ubTUu>V?@OfJ|ccbj>v{^6CO_g}6Xg2YP5?z6EY1!XzyS@qf0Ycyo zuOK0K^{@C^(P8ojvDHkzYo|CVWwttu893JrN%fv?GnumQA32}vG6{NITX#smVXGT-f&W{?OLdm#JQzu|LRVj9_7JPjAE=2mf)a`9Ab zAy_6`@*nHK5Zl4;M_QX+{4AWn;AI>6ng`K$p?E4K0IPv1nYAu|;3Z1JysS^y2SSS?R4u@cwoDv##^y~sxs3TZ9P{;%d zV4{fxRJ6JmKGh2ygURWXjF~(9skC^I_ki6)F#9EEOd#ZJVmWw7$<^jN><83bny&>Y zLev|G5KaS;mcdAD^#EG;S!iW2dlFE;4^Gs>Ag}%LHh~9{Qrg)EWdHM7sD`c1JExBvYFoV>hx-(khc<7V#FICscXhtpKePdPzHNO}c{S>_$Md+4Z2J`3~AJd3QY$$aFIX z`~CFMe8)VB4>GIofqW${KcIdLn~0fokH)bK{=2Hp>_(s@oc@#bn%UH3)&+`=hYRR5kn9dZ z4t}=DW@k4MKznW507XWFA~^)W8V7CdN|4i6qAM z4ebxmQmUl=ftwL8iI;^*g+j63Erc38A%+wZ;C|f;g&~0xDhNPW0h~tJdNR=LCeA_F z+`OLKFu)Did$N&(XP^abKo7X0_}Qc+i1%iQ04)CA%1Iyuqv1qukiSCW1Bc&-h@49tFbOAM`K$%MhYGq; z(=Mdb8GBlv@Exc~)FVe+e8f?}(3glDZXwD$X&-}Zr%EHufLK``s0(E{f(m10Gpv~1 zip{cOe+QoUHphy6YQ=n3>^&=1YQ5Ar<~sh2oIp|=g`GTNh0%lGX3!tM2{;A|w$fM&6xeLy#&FBW zLg$8`qxT*s`p0eF79t za`&uDxqFzE1tpCq?*5dbmvA>3m(uxAp^S5b0}94oOE(x6)Op5~OTCvw2;0wtUob>WYcvweLn*2RYH5c0bU(rF-f+I~e zJ?;Jr(tMPJ0|^`4<^~5H^sJ2edjcqjt{$0)Qv~`U4^)Gz(0`5=KwY!|f-Tvtyx{Mh z>UY-HodcW0prhZm;p_foQ6+hf2lOhc{B6>^iD7!8eD4O5Y*?yiCAaCS<~NYV+e zhRHr%y%HyDErVkvwwGnv>kvLO-rTR7pmo&@vJdL!n2n#~q3B!C%!r+T--lM~JvOCr zmX&ZPC4eH3zMZf!;lp@*Xt+p=5T$WG!r={2V83@`)=~Ac2U1bZXBG-lfSt0eBkU(X zBsp=58&D1u0S23U?Wx6=&4)aSdmK=~W#JVlCwwu5)X?WQ^p~LYyTw0bl>rj~{NsJV zan9z#Apbr&%YW{*w@2(R&YC`73g3c4@(;rh-7PqhhQ|>F-4+^^RuM2Fc83FigO{62 zKsg6dy~={YUOskRc7jj*Ly2!btcgsodhiaaF z(Nrfzump#s%=((j!^xyq;0+K8nAcaC*^fYXVZw?9q@DMn+llsSHX>hA1Z0_%q`Njc zOeE)5^kMVbq|hXU=vWCIk%UpXI(fk9RTw<1<4v^u?B%~hoHUL1ymCKHgxQDre~Ohj z^d85?E!F&ORD%QiC617{XH)q;;lk9jDTT%DaafQPuv#zQ^bu7ATt>$hVvAyvB7`GOD2F7$Fc8S&#d-jJr7(>HPy^SbCOY;q)zN!e7K+yM^r=h#~t3dIqrFK`n< zCWLBTQF)H?&_Q-k_@P+0N#J~Z@;EFjpJP9)yfEKg6;xihC#~Q(ZYh#;qTQRvvpOgC zSG^ZDX0R2q{XOr+jl&k`Ez`a4Y{Y_Htc?20qPHk7(ifJ`L-K^L%WiOp6rg*D1{_>^ z;NUXg%>qvs%rFQj3@McOm7u2O$gv!KdljX@JDk1*#1|Q)^fF&wE1z`!sNP{qPFaTf z#0ZxdTwg#Zrfdbr#r}=F&}qOo#d(l#A<^XgOJ1`lz$Z!2mWEtukH0>@N` zI(+e;%#kF%0kCc1td+=iIaw0-kj`l9*ONiM1}sR^L(3Awf~$6`=uBEivRA8$iqzrk za9-u``*_!e*WDSr~RP!@FuyaNORz`6Sc*=`r{20Us4QXqV>Iz z;&Y3C+#iop{OaOZfBb%mPb_}0KmGv4hZp~d;^`>A8F6#-TI_P32pQYg!Yu)ftTa!+ z{uwgL)?fr&xw?NG0)Ol&1iAOjp@)wirFbMw2l&deh}glRfCFAZUw*gSY1d@E#p!L| zcm_?kSID*A)=jDO8Fa2`GiOs7{QWP{k8Kf8xSW{bCfJvg{t72C>gg9VcPv)3Sz9C} zl;5gO!Jmx3wfU`DDc=MRNFFc6>2FLjZiC<*AQX4gBeBNZvWlG$Ck^4`(=M~L#I3AN z=ZZQ<=V@wwITqVLe6Qc^)IUzSk%F-<@xKocdb{b77=3`+yqg}0VF#$yyXleKx(x8q zXoKPJ2;u&Px(;y0NszV3-=U>rAo$xWa9e^a16By_P?Ufn|H6y1It-12KgUIfHl8g7 z7yZFlxCZI4A1z&LR2+>jT)Pv+P|DR7H{moQ%MuKgP26LDwW#7$-B?y}iWsYUl~FnZ z&Yhw(w`zbS;{1H%i1b)c}FNQ7L>)=Sn}GzaaLSC^e5^9@$FK?um#wU zRT`XTjfHCqTKF048dwrX9I+U57-WGxD=v+$5>fc}gsF4yLQYHNlmC*L{dfna`*0e$ zCb{(s5*8dO9s}l79%^N+q(2(!Iw+3C3*c!b_>FDg)t4Z%X0Ud1HbwY0vVlOWC{*E5 z3eo0n4Qw%kNHeLSPgpr!CpmYRxzSr7|bE|d>kDyr&zTu400V?93i@~t2qsu zQlCW}3*oR2#)HpV$S9^0t62TLW|dHtSP8Js`xTM1D1xmCBdoy z-*z>4Ma*#qW?WO=7MzSR%zlC*@~NxvK`uO|k~sUb)^8sN-Zl2B*tv1_`TQb{M0;-Su;)XfE7y17S>o)H#K+t6l1|8A9q_&_B)#U<587SO5CqrF``|^r$AT|Ktsl14$T4-ce za~hgwHO|CRs=uX)EIv93VlOk(@oBlUtTTuK7}?X?QzW7oWpH&4M%(WrTUt>*4ewWE9BqqPRHvlmm_(No#gNRobd_evZ z+SM>R!?{Uy##0G`SS>NtvOMWMTeV@4lofmE1MYAjOh0R^N-^_lBlDfQSmBx*rAug;L zM(!9F>Cv6v?hBwUz5vxg@PW1yw$>+*LwF9MzF;+fI$y|j@&kEp_OHE3z@WXsn_)V- z1cT&0WZgr4WI!*4bewMw`Ew>U9kx%!7N&kjj}V-y>X(;%;`=>pC^)E+vv_SaXhzrNC#5mlI)1LbWO8cBktOV@~+J%;q{#VHtvxzI4k{34Nq7>`8CeG&fBIk9Dr`5ct zK~6Zm<0YADO5%;!e7Ysik>A=Do8LDO`g$PLn+yr{iY|f>Xin^6u{xLctmgJ!-0T90 zz=0_S+?+ba3Q)xDIRDZBo-%iA9?#>jfepC}D1a!agS&um`A-gQm~YxgqS#fm!mUIf z1#Y-|$o(QML)T$<^?Jyzf|@d`tAf1nIm+wgD$0mUuu@=y0YN4<)%$P25nPB|*Lg2) znZXxP?NbJBB0Bz-s2v;WIG+mylbh+CcOl$_c?7iv?r$W|0%qC}n6U`QDx8&7)xn4@ zR^hI!GHRT#SDD!)tH|hv%aszXr7RUPT&DILw#1A5O5yuTlnxY-xX}?3??vT-)p%30 zZu_lhR_9X0t!2}tu0z|P>_DxArfE_=?XQ3PN+99B#9u@m zbhF0mK^!`8XSQh5(aA1^o#gDuP9h}Z-No9@uSNP{)=qExvBW}zS0RP2Q3K4e&SM`O z`|Q}s%p=;l^JiHXpm4_@zPQeRVn4QVxEF9+Abl%@KUmcsZIkxJzE|v)=fBimO-}<`n zGQh?(Pr)ID7pdDR;zlI#?Aix~nBnFzuv8n#!uk0Q+SJ@faB2bS!%b0g!D0T(y(U)A z;T&@V_`wA$CZ7v3gHvk+44Pr2>?2Wz(<5%fWLKE?k)i6%}+2qfkKUvFkOzj zd*x-7CT^JH&k5#n)*O_v+Y)Y~xo*Q7K<UQXlQ0EIsO1kwbQM&F^EDHr0nh^tqwh)D2B7?_n zilAi&`QQE=G)hu@5lxJ9;K%_k0oJMH<2)NCd6<`o@)-0kXC=MmSfHk`cDiQkG`}$q z6y~3x0xU+5+li9FoOHubIR>^gcpbyJc)-h;taj85W;S(+Ri@{gWqvXhWtv(Cf0>$e z$lbp%!;Bqs(+)|yc1RbX^k5a#NV3>Jpjg%eryF=Q*T`t}QyBQb7ImkwPZNC^B_zF( zX9T(9EIyHg$#JkFe-8TyIOC_SA3Sie8c8r`C00{j8cFzr7LXdYIx2CGz~tKqz*{(& zWQ18k{xfpq06{0AH#WZ!(Di9HWr zfsSP->B2i6qq!$mQ&>m2y&rCJ<(~y}+y7L>SNvLN4Kb7IUjt@^Au7Aq)mgC1zF|GxQc*KD;q8ux7+CO`gv4T{Ko#v%dU$!4bW!U*Im9JC8WPF|nPt zQeq*D8N(MD6*w)9sp$!PsEXxY%SOT9ngx4}ErS=JWN_Ex?Am1omf_Ueg5Y;lU?{E5k{_LcT!Xj6f}Cr#788zpWDC|YJ$FPUh z^t4`dMCO4fZ?5%zxH*M=Xos;&_9=AzOOXaqY@0rG3PNB0<=u~L&(1bPZ>||5?Nc*401J9D1EI>2oMpc)z>K!eDq!w zWId4pJ{e<0SWvfgUui~8;tB!e0$GPZg&c_gjv992vsk0RI|H+_UL(yYoe9_aE)!P2 zv-rMyo0xoC1|XKT4GhI*zXTBuOFl_z{YbHwJAY4ehpI{}P{enUC0TYxKo(J)Q?)+o zPc%`NTIC|Oue`(pD0kK0TOw&0`Wi={NYS^#1LF=-92g$o5lI*&2ldDrAOR~9u{q%g zHfPzy@A-#gi$|QPjFr2wQ84g3yg;!hkRLbSDa_teq*X_0o`0%0m z(D0WWy)eqKb)m*1jSlgW~LW&z_k`#mg{XMrDKH2a&a2oX{ z?OepcE{Zi*>!*tSUT2tkG>HrbRGDl&kD=FMKan;-2`q;f|CSQ=YW`cTolfk)%-73% zOugw0wkplou3o$h7v3;b#eKb96b(4y^&A0;q|(}Mk@gyv)|f}9l4nS4sS|gb8}sGZ zO$f-we22dF=cU4(uv@xxpDeTp6XtZ-|X)jLLEb@LC+g8-eCK(kjtbdgsE(c=x zl>sG62d=SkaaMWIix5;#>jejNV2^%b-sZH(ybzhoS3A6`Wv#^0Zx=k9#*sAk#1`9x zg4;z3?lMvrV-u6~Rw%f^kB{!61`g42OJ$U1K-n#IupP2-FDB}){5NeCy=0G3e)uGy z={NN?vBlS7%Ty@Y)vV@REcc>Ou{538kBpWw7NTb{=8?`tR>C8`xnfJdp*$J|(n#)?bC)n}^~OrC!yU@T zVjJ$LMG6d0#)4j>^tztTIUpTYdxdx@G1@zaF24f)0ZVMg&AqWz1-(pjwe~rdVDvzO z-Y1$=+YR3lC0b8S)_Uo4{|6AqyL4bc>7xPVO$-}qT0gyq4-P0x#DF5ce2dr^P(bf3 zLfLMSQ7Y+M4K~wW!@_5v!isY-=a=kWA|<&cgT6Q8DJMrZkTtDeIj1>vAOx}s<@_d1 zY3fgWLCU#Eko8R>E54!e9Ya3e>xd=Ex?~7h{Vv09l;-qeraP3u-MfVXsF0zO?5U(` z^wu%@M_m}8!JSo$^b4L~bzP?Zrg`FXy`slVWP$DUSIvU%6Q9vAoh9_%dzcqgIhc3q z@}8-EneS@D^fouVF}x=?a_>oP2b(|z{}(Xt0p>kzWdchg+-o_Rs(&#i2qa5f%mtOBe}#Du+bI~2 zZQE5kwSsVd3kSKe_+S=4mY1@k{kaw)wW?FWyyJU`~A#Uh`JL zC^X_(4ZV3}Ve|;}X2m&n%LNA;mXCSQmr4GExNpatrWV`RjbtrmH#xjF$=WK&l8~Uf z%h+2a;JvYJh2Tb`=FHSpO{E6@`V_5zRh+@VKRGio1JYxG?G!_z1wDCepMo4(CV&7s z`DRCQqR@kSWcGcBajydvvhR~(P#Uo<28GnmnK#J>04fQq&0U%j}44QEt&ADPPS*R}Q5R;-4pJ&_vMFtyk zrZLP|Jc5KCx=`z~A0xR&(sdB)b8L9*UYju&w&ii&2{g`v+?Z>L$%2-yPopGKtA-p~ z;230bvKz@5dvT^1>y%u+_WQYe>n7J$$!|t#Ef3ua=4%>5a07wiT;uz~;TG0K3O2$tJV2_vX z#7K-OgJc~4!Fa~$Rwt#y= zF6U1H87y3Xh*#3CI2x7k(E~Vk9snp7+t@me5h7(aTg*yL6&#lde}D0-LYscFo1b8z|zcF z=|;?hsF~e?nGj`O19-rRR8?-oQH20f%OtiY71;1!Qdm~Y*3>VqQ^{u$;DZ4o^t7-YUri#DQ%{Ta|6WoB5 zxLG;S8sP7q5sguAWHG8U|22CBHi~@S!^#6sqF}&AeMrZ`dk&Zq6H$0jS-0Vpm;#Z+ zcx--IKv>!jfr&Y2#0&%?sklR_61Kw_6;z39&4@0^+?Ey5au8UB3~=lbtqs83eJ;SF z)RjyE`7FmCBHR@KW1?ynBSx~f7VRYh8Bt;`WoI_N>-(ww67EL?3k{SB9EKFy?mw4x zNx?^9tJ3#VQ8s1gTZouZD&G|43Onx{_?OH{(IzV|6cij;r}u%>ttBP8Kqkf5OYO6| zISIJT6lr|gG%SPHc?BhvXqf5|g{CC&RIk7#ECEA&=RJ8tfxQ9`YMF%%j;<`>7BU4v{$McG4;(AIJV;(HTe&fO)7~OG*a2d4a%}AZ&tG-Zo|DjUtVz&KE6# zK|;BIG0N`r;EN>~5P2nf3=J!yCRHGPut|i6{v_r9R+Gxu!{V#em&ywx=g(iKqgkVM z(X5n6*2;B8j?bryHm4+C>kOCA*C2SNkJ`8Qf8M@-qM=t%V6c6+iZsGwNc-kd`+WE! z8nlf-V&7^A$!Ylo)2yZLnPasDjj-({Nc)?jDY)r}+F)%4nEEA)w^m7O1UQ$=)%zlP} zONt<-{v=5uc!5Ob((?8FlqPBG_5A`yy(*GgTO=eDzcw)%Cfejy)77Ex z+r+g=xe)r^2ZO8N!1}^*V(pyA-+7+$=YkacLj-k?*razdfk?h!qSY%gODK4wmWO{X zPPn0|XuNcVV1N(22`Mm(ZQJ2*NaMqCiDU9+M z!*Ep){R&PjSKN&TXB%-Z8Ou}-EWXyEe`Hf%4)7vUG#K5Py}NWKF4h=LWVJ4`xw?l+ zf$Qz*#Ax1&B9oMHh)QX0(Qh&(3~9y?#uxFkLpqg8m&eFGXqyws$+nH+za1!u+Vt

@|$jDp4t7maBT@by!vG1&J_?=DS4W3Hu6w zu^D>0gT`DfGs$gel^vGnqMFm{Sbi<)U=^ovM}T{v_J7pCAK-2wQGBXnZ^mrGc?bvo8MSvz1spgD`Uk!U$&1RXiB ziRLDk1WeoL$6{zZ(?vgjfdRksQ|J|JABy`ECh`m*He~nmN52(q!R-kxq=%5#(KIn} zL~My()Fw7fH;>;rMA{+(1;m2|oZ);nqGU6zokoKJN)7dKi3EIEij9ciXht zv8{BCA-qf{#{6gCkKc>mtqAa$FGGaMK#t4K@nbN(oBm8cIMe$S7UyjwVs!oZt(d7| zb7u36v2AI6Mx7gFOt#8!i!#n&PTXIHyGV1R3^>@om0y9&buceznv`%ftx7WsYkJ68 z{~S5%M*=IvZ_I!|FZ|~vJF-4R!5u?^u^+US9nODKzmT%6BDOV&Lb4ea3U_`R1vJAA zm;KzPN&FU+$qq-ZTw&O#+%e=Ff|CJ>;X`W~@D#>A8Uzz08Hu~S8w&sUN9CSW zMaZFqcBaJ7AbD{0QyR{S8-5R)eFl}o|Dq<3+(O(~@Q@@qUI8rpFf@R7YtXnVW*CkLFO;bNc&1^Q&q^imS5H5D_u)|n@dtbATexLU{scQ8K z{0foM_$;z`D{_?w{|y0C%Z20&&Dpt&zQ4BJpWKci^kI?7NTNTQzcmF_o`V!e;%S6F zJS-FAa39pi-)sRKso=2>!1=vs8dX%H8Dv@R(LV%#G#~Sxxe+^nk zsF9cd2PUF0g@!sqqHC~&(nUH^^o|=R5a~Cl2D*y$vd2Tp+J6RX39$y8jC@|dM``>3 zErhERybREN)Ngz)K(XBinxhZ?z-DtnP*59RErJ3Uc=n_hba%dh+}n%wo{lYr=q9UE zNAnjagDSo7TKZ!=T~H-1s4|QE+%D-??CRk+dI9(x8jC{;Ek6>v6A|F|MDKC@eYBn%UGK26~-S zGl-TwzX2rlBrtR0_pr!G^)Di+J$6S2j0<80!7u-pfeRop27#nBXiP?;sZB=^zi}n7 zAr7(_6R7j)KmsR<{*jkNW#yot?{0$VS<-$1guRjcj<>k{(o9F*Uje);_sb@7}A zvkP7}TkuPvgR*;^=>84a4Ul{9rG1P|boI`dV;+7?wu*naOZ0FxRS61_^r9v-4);#E zY5N&2uGCzxSQS4)Wsa|*9KaGF6Q$mfW3*gX-Hq_MK4Yyrgnj; zodHzA?*st-l3xx)@D%p)2KtC|_(x0A0EZx^o>Z#NH$cMe}d z@9X(O5%utS;+@BD5bx>y8u6aNFBk8be3E$2;$y@+mn-63$kWAp4mbZdVdyhA`}jEo z&CR9!jChyx)8f6DpAzo?|ATnn!e1Bf75tERui`I>_Zt43c(3KphQlxqvE}R zKP28N-znZ(d82r52O7VD8!^xClk+M0@JA1uI3G#eO>Bk1M4dD+9c}&Na7W~x4 z^W9I2X`?aIn(tqUC}u^N3E@Iznw~oF3u^DPqlM#C$AYCAxt@OBJiKYxf-=kv?Mt<@ z@X&POMyy+@81d_RUncfmaw-S2oM7@C!T;0Vxd290UWlV^B$Ei%bK85*z2}~RmA&`>e*f!VYyE3s2}W2t*mRDL+r|C9 z-BHe;*vF%45dPr)Anr&THpVEgmMG^A`}nF4xLvr{9lmX$=(*rPy-;UNcrz=pvd2^n zSL)zXy(+bgPpeXY3}em*(8-p1R3Xtv6xu5|ZyY%94b*Ei^$HB@{&XygzSZ$vqKpY~r}R4}Ze^cBgxPX`g{_}Sgj z;{Nz*KOU0)AzWJ|{oj-ROTOmlKz&%Al>X0?;}_&#p&K`I^QR^C95bfVxkWI_+D`>} zt>jK%J**<`M(5?Cj?edJXX?3IZ!;XX-nOD`GBoXw3DKcgA;t75cZw>n{P>CB`0p+K zcAB=$-}-B*tgp>p$pu-PZ65}AingU;cc-aP{CS#uZd=cv$ANvoIBDKk^!U`zi)x%3 zO}h2-qJ1qkU#m*}V0Y?_%kHo$RFtnJ+SeK_Wq7hX)HW*&_EV*V7;VM3zT1~HZlWN` zKoT$!a07{e3vdAbjBlN4$hhwmPm`y~^EA)XJllD;^X%Z+!LyTRCr|jI_jNVdg@vQp z+HIYo=I{rl(xt$9;9f}^>G<1FMlUsve79;Ja*=r%*&;MYIBb)C4ZNt7u23h8@9Bhr zpMU&B7x}i|PcFf;Z_?6_@=99aKKaz@lS$Gi9h8L-5_p@PKNA5D&^XsN?nwPSo9_eF zdLOFR`$a_3QnpZ-p1%4Z+V`RAh5Cq)+akhI18NxRvkz>(52a_FTXLDI5iv;namw&C z@GIa&U@veGcnx?Tpsh#J)+2c)@=WBJz%zlTizmXO--_pnfa#>Dr^J1SBolnyV}9RqJggkQ8*+(SQV0ZRd4+J6-wAV;j}bDG zv%Io9W*{f53OE^I*<~OQmV|J^>++U~gs?uqU)AONpuecLv!SalJPu)+X(BJ{f_@Sb zzO^&8k7HQx#X)yd+Fi7lCizq9=a15F?HhL8a-u~!iV24Y#T^QU!{ zzy%a@KNyVRv@S+2W^M_82|+%>&P54kmL$+nE{9_yh&RjZ#d!=%aOw5)#$eD|pOKzl zro`tR4>7@@#^heAX)EMxiF)EM$opT5EPsMOt83~$^A}r{yuZuunYhI78Nb9#po4sS z9bXXlmrD%Xd|2k;BD{-CLiQf4p4jVY!aTfX$$?N4 z@HW_`44C#^9PeKepR(9t^ix+E_T()7&373PfdQcx5d zW6?^fPSE2)R)C9OLM|7oMi*QJXFi0yOtBOB^24%Q{IIMghjK zzr7ECJkUUM1NN;M!~Gh^%nP*Ee0G%)c zCt3Vlio;UG%JAx0$gewJc0L!s@JzE^cQ}9hvac;EFoH{5-zKgHecr=pD6z7x@U|5~UW$gZvHPc0`w^an11p`i85cF8iVrFY$?WJRB(CCI_ao25US9JC2K$r@F#Bi9TUS4RZ?!KMRv9o(o zPU$Cx$&J{e^&=Q?X!rREbDV+EOBaQpQGbW?%0`C$h0ZJXAAtLYapTDIO5#5%+&Dq} z!I2;2bK6AzECtpB-Di+5JFiIU;IrLf&wpM~Ww_vZC6vZz~pxcpd=9 z{X3jjBr|_dDm@aI2+R_f|Ly0MM}H{!s`HA6*9)9i9;YmFq9Me#U-5nn(D(?SG0uBl zk!+AwA^9P^d@AJSu;JCPi z`{r*suPE$5&KG&P=1Z_&gjTD2wu{9r-#M_eGc`i>i!uiI&P5v|&!lC*8wa(xpP(gC zDA#L{I2=Uuk-28IymRPqfSIt[c}iI#RErv3nvcIClH@!{vM)zJ_weD zu_-L8NU*GlC{d0L!!VW10^+~>qmNB~Y8H+F}!P8_d(PpvjzMJQmr z)FkX;2B~<|3JfJeWv@IXo~nTtp$}Gjie> zs8UDG*kid(%i5QCBp~MA;#I186PI-nZ&k7!k8BiLJSuR>h7ArSYHD~B0I z=T6L{zqglekt0JjG5z&|GWb4?+B5+{p^fgTufl_KesA{@I&g7rNq==^SGc5GcM%$N zDBG2)qExz*Z;jGN_-iD-y8i2BCq)p}2lKcspLg>w-;qwg(()HXrZa3jd!}spuwBVX zwmX!iwU?#7uoQnunw|OlU~+c z^L5Ak3zWhaA4B^FhMMboO0k*O2GL)lD9_<$5b>czbCvKcSt+u*gA*=%dH>Q-Bc11h zzO7jbXN)&5mBf=w2anK6P$YcJZQoWa2#E!v{hFKxxm7Fc)Fc9iC35{|Lp7bIDjrhC zgMiGf4r2yquH{U7WdMio;XS4Y%Ry{q7#kv#gZ07i`7eo#MMh_o68E*Fd_#nrri^4b zX+slbsv>+8pmck%oLDUL()8NRJ#Z z8DReF_eq2zsjEXGs)yS{k}ykS1B!ZrY0f6O65^lslJv3g&wfpDg-&EwF8wrc=hSwm zPlV&n%%yE_@onOwK?)`GNJ6MQ0drMuBYWCH5dkD)uErh@*k}#GcFl<-;;TN+5vb|b zctkCv;*zL7f)A;QuO%(81r0)&aUz4EQu;kA!k@7i8RZ)koMaWW`5cC6n@{w!!J$5d zx}l)4VP4xL=BKi&c^{n_Qi`q@G{vimblcVR53b#*X$FUOQFm!A8JKahNSiBdY+x3bJZfD8n{--FLUM4+Mx@{vM_ep zkk)U=K8R(rhU(X_faI*ZO}cn`5t*O}lx^j8|0rt-)o=Axn^DGcQTi!#7hxLTq?|HQ zB;T6(nrsCeYK0_o%)IO+CP{n#+|;w1ZmvD2c-J{i88bp63RjyKOE!B!D3U{RCs*Zh z&^%65VM(J34230U4bHS}M@SYS9TEK}c%)2<$h1|T;##zRtjRt@#1T%J=kAhOiw+Z% z7DpyWVK@6%9K^uVD9LDKj)dR^aZK6$@Lt)l;sj@`QSzBm{TlLG{JKM_^60Zr2w~nr zr>P-BaV8OjjWm?hQ3$ZCx+lyD%q`~4iNF9xWKi$t&pzBhwN9Dq-o^v9@=abLR#|

KZqkLal4YCRR9VNhIM|rBqmzzcImvcx z66fD`zj4}M-A;gyA17cSC-oI$`q?*q&8~)Qv|C#(aSFd|hYbf}FFVB?n3Q?Svt+Td z#AW4x=9X}?aizE|`r{}3l-H&b6-{_j#STR!lD001vu;K>KT;*^ChCevBwCMFpg{JI zv``4YsjK1&142Pl%%A#u3rbGso1<_fngd1`+}!pMu@z5Me_5UFxiPYKqFL4_`WXmY zeWJrZUKzrrMuBcHupOq4Wr12sE*T-*CXh;FA=)Q+BMN(?DJ!kq?%Ww`xlG3e;lz2t zY?tl;i?gHO_79VwJ_cThq^>FqRUPlqS?IuI+CfSbNkv_1l~7eGaCwRmuOF|ic1ac2 z9ldo$TN~LhX~J01P75nyi&d8=Y@QNZ5e<=6v_R3rM}nN}5ae`^LV&sAD<=;*z=!~` zvJ0@i!orMuT*5kyXNzJnxfU!+#FTW(syy@yj7XX8#zD_9TWBSg(;KZ25VO;is;-&R zf(29n3U}agkC`j4sjX{=`D1EkCC@enOA~v{GOLYQKAdPN6+?W+QE4fLMhrW4RGbH5^K(rm4T}`=ra<6GP2}cRBE9K8^r(O+ZvKpJDL~qNguPmwQZp-8m7V@ zN^KFU8@Q*E7UJswZD=OYtct4KqA&NDKSOfc-#M>@o#)4;YLqtENdFS^3K9&dFBr|M z*loqE3X2sMmi8hv#7H5rqGc_y=ShEbHT^m7S`?4d%B+(-6dYGI-*t5E+< z^P3gqvBIHjFQNKiDKj-p;Y*MmMAXOK^8{gVhrBn?Un}%9(JqaGPiann?Ll$aX-{n1 z!AnTWyjwZ7y=hrziEYVZVX)-}D^!8a+Bc<5#*3h1xvWqS7I$WL>iwNNvp;P<;TX`| zOF6ZibFB4T(YJC~mj~?Ev*ln|9sgYVFTcLiEi{YE;!ZWj>X*aK9|va;HulW-D`RH9 zw=O#R&of(j+rwMS%oCi;+oFskQ}@q2q4x)O3k5e6yDx`kLvQs@M`+D)vGA+`X6%Dl9YOA?Qrurfg>XqT9E@^ zgWxOT&hX+yo>7=HCb!3BO$p54I3{j@qbN!+nu>Ti*O~vw`5RU!f_JXS+*x#-zFp@m zr}GGVhgT1=p-TFp#dtAVjM3QdpDoi{l*z?1s=d~(E;Fkn=*i8+oBcJ3Ib?Vh+rZWNZ$pO`dl8LcBv_cAA zc18lYB|rc<0u%wEdTGEup|%_S`L>@ui4LTkvnNApm#>+b4WIF<} z^J}=w7L&$J%unXCb|Wy{z3WVlMDNhz3o7S-3)6oqjx)7WX0HTEH{-=9>q+ zXXtoVPHKfVJMk8bt&h;MII}u~0l79^#`5CdW6Ef!eb|E&Q{UJ$n$yP;^Jd)qhw~ej zB?c~nN*%0zm%$}MD%|VZuS8W+Qtf zS+Uu?;oSPLL}G`jMH zn3`(J{6K%B(Gykos(!d}z)Wr!%sjC6=V@s)qG1MJN~uoVlq{jeI#XKPMI;@L^`RBZ z0Fhm zEI{|uQr0z1gk4W{mj*%4Z*00DBL5ko{4X}2{Dl0wAi#aSmq_r~FBHL|;}P&0k>OU! zhx64h5vSKwffV0W4JQs2dFBrfQx(B{AK=BGc`U!}S&BFnE6QSvw?`~m^}8j(4$IzQ z_WzjR?fD!VI8Aa=N;O96$fIWzW@IV2KtfOm4MwFVU~FM5pwL+-yY-+$4mvEEjvjP+5JUm8n(w zTE>U0(q9W!VAi2soP~_07HUw%Pt_tTYxD^79a6Fw-(PjP4xwLxv3Ycv!%RV}m`xvC zX`nx*(H@IF+EJ)392Ul)-t@Oj>L>VGb7%C~V}eWde6yYkCcYR2>L5_BFiz*D#3I_* zY)|v0XvW#xv=Y0=d;t!!=&NUW2H8t2>2H>>rUwQga=@Hd8s$Z+x+rNk0%K7J*cGvn za#2GFTwHgcx}(hY&AoeJJ>OtvvdouZfGLkWz?5@JX6KrhfDJ0`xz(qU+f2hY)2ykx zl5dMrs#`m^OO;aljpVNpXHI7j?NBazjFr-P<5NZ{lysyym6ILI!i}auR#r=s8-sHH zo|F}x&aDr!mLdRfA3dBON<#lrL!uSm7=o9syd*hDuX`F0HkX``(5Ixonj|KOyUg3^ zQc-Q1zi|oXoEJ7t`z@l)r8HbVnV=3@R147(4T%Z?MF>|u+vhb+dmd}f?PMV8SW8Om zNGeF;<~ukE61hiT7Fejt`7XmU^|R{ev+p#`i$*Qly)%e2TjDu=LV)p<*h6u5gyTBv zF2X}pxW+%;eRIVAvq#45Tg=WlQSFR|)0f>5G`p(9xM7}| zFKtPEbWZkN=1qLjD*3c&W=C5QZ78nOyIt7^bEIKqkTQs5B8y0Tx?-c7F3RU`pPOs` z_?hlA-(AYe*|k@#n%-mt4P66m+?M)nmWXqWP-^>As_PEzQPQQFQR8 z8-h3Q39C3Q91oVz2*#A-KL%2bY;8!cmJ9uHA`|C8 z$NX`>3!Xc-34zzMQ(s0p^HbkPL0@}t>MK)QkhQHnsYONA8Y3sjLq95yD8o_vXX;;L z>_rtUVz~Yrx{&>y!BX_$%=h%m(WLsmNbc^@hvIY`rx=`G3p{Y^ZC06YKwy@l-|)Hh zU=6u>PjJFvP!kJ(Tc+sbM_EIjrY|G=W}4NvvWB>k^nM4`K&TNt=8t0byviN1Lph6= zm_yLKL?eam;`vUGWXllNQpvgH+$3sPb_yL=Bg|EjmK*vv&mK-$JqW8%=|ASK>2#&P z_Hr|Y5Dkgu7#^X*C_?v-?p6bh!n7?WmSW!JeSwnSm}M7T5((zV1Sgd@d05#6N@`iq zIof-m%Wyrh&Os_zmvwFpf)UBIy{<8BeDtovo%NaL&_|tBV$bJ-C;E$apFPY)zG1$1 z&owMVml>CDJKAdL5zE6EYkt$pYmLfF?wDG0`I8N*#DQu4-A7E6KcN`U27=18Fz;s6 zgRIKZJ=&bE;>8osoUL9Ryh=TbC>SSDx$a_ae4Sb3Y{(ciQKVJ&x*C=an(TMl4xLH2 zXX$$5{C?<{&`X7#bw|C!?@WU>(wf=M60Egk4C)t`yyBd`(C=(qFld4VoFf6R4+pHN zK8Ll6cJ>?zJRuIOK|)?8A%{uGgm6egv3W?S%i_2=V{%GzdHk`#X)(c}lhxAXtow#+ zFHp)}cHUdTEBD@=-@HTIVx!PQ#~t7^T8*<#^hS~|xc9~6%di^At;m{`IHO;U1JyJ& z?$6LV#Y%45gWjnIu3a5-`VNydN5;meS;L)mKjUK-hMMbbbJA&Cbq9~|S=gw!q$wS} z>!$M`UNJWuIMmgl*gmkLk_ZS(?`c%lMZ(&XFK8NP#)0^vSl6vFEG>}Yt=qY z>WCarV-#iQR(@uObO3d9Zj~Ae<}6f(n;Hky?Oz`=r|lj-I0#^gmZN5;ee)19uN-uf zbLW7xnioz$Qqpv@afoy00q1WU|&pEgH8343To6masFPXZZ+i2fw zw(TOJh6NWV1zH#tgBTU7eP2E-U^0`E%lVvRweM3##v6R|Hc)r2ZWu6UP8uu_SKF^7 z5Ei+b&tX|(bW>KeN_C)b7q?VhC2@*pFT<#gaK20zQb%f_ppm8Xf&=AdHBgp?2g=0N zzUt06{THYVS>0fh!O|&%MP5GTWr9DpB_rmtxWJV%cw()yvDADh1(g)ek#K;gD6diD^_G>B>y~3*2ri=>?y@k#|fr6r^y=jEkKl3E7 z4M}aqf+KgXac<4$1&vT`xA250AV##H0=5ek@I!)vK3Iwme$0oDmHS)WNy*wIdYTYj zZRu7LFxIS58JMfP!&x-K4>+HK()5vW=nSz9Me#w3T`4{giqU44ixKrd!tunBaOeaO;`@Gg0VSi}FyYeUlc*jfuoTFFEd zOR8Z4RTBHrnM_v=qLS_KTIyGvYt1|?i!+C4y??`sV=b9MS0Ju6Q)C6T`W3;Z%o85d ziENh~l0#_RtCgzGELP8JHB9M!#^AHfT3W1T^h?P+q1$V+gEe9y%{FPzuSsRs@Ay-r z&&$%MWa*cg*GZ8R;SHL@d5gHczoSYe+a|;+l&uAZooROH4pP=g`GeNXPLfFzb`#S1 z2_-JE19Kg4B`^wb`OGw9drEbu!t~n%qeIJiU}$Ld55)5#)skz}?aZlPlQ8z#UJ#-| zYO^vmzd2P;V*j5ETWQQ}A;NIjCB|%xCEmF;jXrG6JdLv!xSAK@X@Sdl!B-26nk^;Q zowGGGn&>N2cRRN_tq77S`L(hZ^0u`V19Af$;OpSM*@-NJvG_@@hy5J^vd5CVZ8v5tF zwQ7lkRx1I6-#=R@`m)Md`q#Na+?08k)vz7fn~b?P7;2Kt8t}>IiMVUrKGxYujGZWb zLanz`MzcgG7IDuLahiX|7e$b)I}hh9p%{<(HOiH54&kp~Ytv~>ArTCn#S8~^$oQ)X zh^?`%yGTMs6NUtL_ntBL;MAmDP#8v#36b}%i_U$y`ln#i)B;*>S*Pvjco$ClL? z%=q~elnuXpj0WVh4c6?B5^b?x@W;C;BYJ#|yQV(-^BV8xS@qdyP_7}XGtF%KKWAjn zLectNCDB|O$s?N`pgU^fn(!runKLO{ZL*IDdN#goZ=z)9FDy|a4b+7tIf&rq{hz40 z&UP~#62@?Yv#|LPJJk&HQ3e)?F*x^tH_b5TT8Z=h%QKll3XntrekU{W1ucz%R_!vl zu6JTwtI@B2wku%k4*@aLHLf+aSdHs*_rgZ{Wh2W%`KXEPa`u}qU^8Nd`Gtzm`f-1-zBi0iySJ$H?3COIw5Sts}8 z<+Vm%m)h*yTBpLCW?Q^x1F!Vd+Cd-yYm=~2?%cW>C+BZ7&rJ{WkI2`jH+ zb9w~ZgNut( zRG;4bHiKMr_Jpiv$aIiF9yPwvac%awnv2~cp8C&!2=C}j(2#tMi zjAaHm5bPpSUwa%RYp-#*{ngfz;(tXArj2S*S=&8{L(57D#>Sy>ye}&aBu|6{WXYoR zJy=+9jhe&f&&Pd^I=}K3&D!?hXM~&KKNL|-rI@I}J}9IBm%CT4Pr(h2lA`RU!W}#z zTt1O71J@X3uEEEm16dpYC#BMwiUd{3p3PQWl4fnzvSl_Q9@M}hNeE;-!hE}nWGGc1 zPd%s4GDneKLvjGcS1HB`9XaviNE~IJ5)rQKQ@w;(FbQa{p*Dyv{NvkHXAi;5a-v(C z`r^gH3Wfzd%G^(xROzgOnu~kNc%v|Y{{$u`D4$wu6mDT|WDAsPz{x$PmVRmi?cZF+ z-U3yHJ4XL3ya%Jx{3B1Os@RU`W_KkhwTO`EP<`_mS~KR8U+7dTIE{Ja&Tt#Gon$nl zE(dWJp-%nLFGR6dIAy<_TXIXDnE(n>ay2-K8OIy5nAx_qmLyOgtQ6Fj%*-=qe@HKi z0nCq$syuW4!}7)5RiQ;?m+>J6id0FQbux>KbU4=#b?)3Fg%G{}A@pSk=NYO@J@Gx( z+{gD5$inzGt&2vIBM=9%&Ys$We)D#=;$X>?T(d~*H3&8|nSsg$L4-o()4BCDnT9d8 zE_0`&P_=OS)^ylwt2<5* zvwCk}v{^^0RD(Mo4Ce-R%T811{Z?J%>mVhkZSqsZUab`AH#ms$5NI#mLjx`}sob@d<%w|L( zocFxQ+iwIN$`Lbg(^wA>sk1CDaCHq1dn;88aoAtv)vqavty0V_rw}n1A$&%RTW^fp zY)}2T(vF=bG5SC~B*4=@Q8ksK&3H(1Umvsi=+-mqUO_!8b(bJ>RT_kck`^w4=oz2- zwmQq2dD6)hOs(rtPvK;BG z{Y=ms-NO?H{RWf<@R!l@1ap~PGv8k0k3-q__{PCC@7C5Fh^ikPxV*RPmYM_6 z0kfvSzBw?k$ERj&%~qlI8?ow$vto~Q!31rW=wT=8P}xDGS$oy?u<(xFOYiHeWgsP# zT)aFG=O0)ID^^KfcN36{h|5_lk9ol2Erhw1%VG`GJQ^J0PAl8jr?Yx*E!U4=K2it(Ud zQ6rhrtZtLI1dW*3;fTHQ-7(GY#w6b|7=sK8vsi6UF!k;QP1I`7T{{)D%r}j9f6JY_ z`axh=-H>^}`P?qy;er7j3=la1cXR(2P^}~G5U@)^Y9R^W~(Yf&ei6pNG>XS)n>Z@{y@SU?&+x_PP zwi4TIm{g4?h9h`GI^_uccL{tvDS( zC7i=<#ERSNqK5joFl%3Dof%|KBvEU5qQ@ea%d`kN0xVuIHgfZRyPgfKsk;4%Cssd! zRZy@kcG~O{Xfb=dB)TDUpTCpV$~J|+y5e-hioLf6Tpsho_n_hSP(E;qsV|s#j?^8BAB(5Hf@{N#z(eFM>tMXu;~1uk&K# zE;Rzpm%)M=;(^O${@GT2SY*Q}7pOi8US|%YNHQuI9Dx}gPKACg9BY2xSRbtn$9iuY9oSBsmKgV3c(wEn=%-nK zD|%o2NhvE{vveJc2sn-K3I^M)_Ob0-oNJyT-AUD_7&*4H{_58PGyIvmsB7>#GLE9O zM_%Yt+6~?L-bud7E~=~mV~m!R6?=_4{MCo0O}Rex{k}23X2mR8`5ssCbIoY$sMFI9 zV=R9en4=k(1bGJ`JxbOSr0X_SY1>&{IxnuM;$(R1rZhlZsNjrRzXB)?&li~var z?B}%klDLWDf^4)nO#Q>nX4L#{frSueKHj{6e&Bw?L>`d{`ZHFsWS3ZmQoc`R>p!Zt z)MWNo*@Q0+(@KUAHQ#)n2!1ZmKjktmg>5tXOlEwvo@l;@bE{CFH1qfBRZ%~VD0^FK zYxkW_5R7B$+uR~XI@m1DA|0`t2h;L9#E9HeM)1wN?ybHta2K0&yD%+>v34#tOPGE6 z`4T2CtnhJRUgKcr&fU(Poo6zxgN->hy>T#X%%RSme-YWd)|AY6vM0lNYNQ&yn% zUR-P#5K5nU)Yx-dWQHOQ5Jo1y$g%9Mk}!8IeeMr47nESfX>;2=StXRpPm!JqVOg!O zss1JtXWbeChf1w%MT>HGxYweE6iHzp10k|K23P|lvUm(HB!wrCOfHOAC+sN2t35LB zOh)u5B9syRTR=6tT`Fqj2nANt5guo2m zFRo1DZ{oTuaTy*M?|e>p@X=?|N4fNYq|h*m3`rtjb3S)K(tr~W*Ak!p*pjtM&|QE` z1g;w|3YQ_Trwmq5RfH^6ge+BrELDUoRfH^6gsiVr1gXj)W9({XO@BJWxitVf8QE40 zLOB2Ws z#?1K7`D%?yj@5<1AMJ1LLKc%*@PGU7yMNKNXMh&qIPd`w1JXJYmE39l%IX`-wm@a3j$7_kLoU_KWm1ZQ4y~+M(s#*}g5UJIHUI zPSYM7*7F_qSY1$D>MeBZW$%;b7krZdIkX zK=(%axhGU<{MY7`8>NNrvT{ksyGmSfD<~6()x~9nZqEk2sJu*h8hXL)rCx%Nv^H*R zh4Ps~G%44(vEA{?E4*bY)KyihDvK-hDHR(epUO-M>aj|vX=}79ZIxE8Rcc=TP0ZDN^GT57!tV(H)C zO3L#<8gjb@-_RT@i&pZ}wDlG1`8fyy(bwVN;ozTqYEO+#*R)Fkeo@gjd%u`iNB_71 z@dF1rU4t(gk}&k*OA?0-A2D*&=rQiGmyR1h;j+soUUB85$yZIeI_a8gr%szb28}9zb#_CO*6`47+OuE!lUR3AyZUP zMf}9 zGO)|^f>p#MMnvkDSGlWws z7zSx)=geOaF>~~y;wpDRRh4(m?WG&sg+^s@*&XgOl3FXppd!U(#d>i;Y4P1E`M9ML zo;e~F_7c;5yKx8K?hWNeWn@{WxaaF`g03mA(%q%ScX~-(s#EE$GD>xK`D*v7g3?mS zjFyrzUA3xwO@*4`6R%!XT6u+gwNbW8wW*rn1wDl-tI{itRXUaDzw*o|EzK?{E>m@v zdS5H`R@1wz+_9cwU0rLp)hM0cEx%T zdqSa%f;;<$zi_*RA{7?s1r%YR)#VY>Qce0w?_GwsN(v*Rd`W15p#xdT))X_L7cZUBTaR%G35qstwOO?!9I7T6x(TZ<$UVB&=$~^M);`yu*-yRjR=yteQ`& zS;TaiuobdCcdtZ}ge-4fHG(xQyLeS)c~$vp-JM&kYB^`pr0(`uU@dwqPg)%FVak*# z+AQ|&J1SYt$_iMKjj}t-%GZ@$PalSwFjLm(v2k&1q7rPTTO#x07|yMMVxr?D~p|brlu8 z_G7&NzyG75fN-+k}Y zzx?@qv+Z94r~mDP58FTb_m4Y1Idiu2)4zPy#pTGq`9O5x1J74F5dCM@|35qbzq$SY z+JW@K{^~&bpI!f~teI=p%&Zd9gjUFJvOAlfTV6Ks)3UR#E-bv77k-{>O-lzj6LXGJ zM`vwe`P%OHMVywzImcVUk<<#1Zrov1>6&(ZBmJ+sIZe9;i1gppryTXS_V$nL*F@;USBGfC;q?2K?~0NO$CrF(miG4V8~^$Z zz5OHem-q{7zuf=oExrBw_UHKT_4e3MojVc!>izt0p32|GQ&|!<&s*lL zgt#=vqLj_iD@!xiLc4)ag`Y0mhdDx04|5>O?0E&n`rPu$94I-ZUTbI6zNgJmypm8b zw#R?6K}3&8G^?PjuoMj96G=6@ywE81&V^XJ5Sk64-_kOLVn3%6QZdB99CllX;qZc@ z7kCTSdcWZQm!4Ftg!43Ql0B!?3odbKG&x8?(hCbA7K8uvi;85TR7l)8R(7W^M7e*=UzOp7hJJ^) z(nEEn>)w|f1UFHnFHL(gIt%)yVs2=UsdtN!af>R6N2;LxK6<|NfDkslh4af`eF+6m z)0!jQ!9K$7ITAO0jz`lHq%{_0X3P5tN(1MlxKNE5FdyxD`_j@X0$BW%S@IR)qI^x> zyE!eh_CDPVQi&xzl8mB*r zXq(Ugqj7T7_*7`$Qn*y{aBS?iP!3mTf-#?^-i5iIkYIy zvkydkGkwAIZ-|;(YE%_T+BX=hS9>d&X@8DhFekg9!fHo)VvMc3EtZyt8%Q%FL(vv# z)_jt-m-$7!IlWy7(ZP|O!=%4zS*IFa1D*?m7zHOeWzo6==yb4tsryrBtvuQggi z>ruM)a71ku8G41G%jkWeSExKKMrK~bDzG86%1Nf!ErdI}rlO$I+g;n--Y%5-n3OSM z9OV{N77Jr0UArlB$->M9oCgX^IV_dgmcUk!bT#ddR-D2`tF7dFDt#B-`T)nMV2ubY{4f4woL&rs$D}RvZs(Z@^aBP0$f0Qcfmk3O zaD<-XCf`y7@e`h0*iX`xxbj3Rhsr~yi?|I2E((F41EvhrZ{8zFFW^oFyUm zoY0eHTBV=QQ}SjxR_Uza=>}MEkw-%21CX*xJ)}G}fRwp5^xVQz{C$A<*8x%0>u9fK>QPF6ltGuoAKJcHblus#4r3Eeullm-+iBb z{ri6ZweT1652y2A@9DbW&#J5Yg1`S7ZE<0ygjK%_6UF~))L&|G!66XZ$uBqr-2Zjj zfSUY2J`{?Ef`>)h9gnkNt=zI<%h*uoJo%3Gvi%9`S^L8iUGkQ;sYX4YB7F0Xw|2NK z?=SqVMfO#GX`$z{Uom`oDEv;szw+3r$A)YF@|gM9%~oO&f4kG)v|Ysz-BF9*y7eu$ zcH3JeZ(SP^(t52udhAappr>84$%KX=g3d?)=o1`;TQ*b%AWlwPua^IJY^Ce ze?Lv_#ZU7T9HXA+5T3X26r5%}&tW{f{+y-_=ed{X2%h)y6kMT@=V+c8Jjd`n@h@qb zo99zJ$MSsURGP91=Hj`YZ;j^$9_{a?X?OEH!BYm?ah^e*2YDWXzWY^x;iK>2+=@jadL7(4y z#b1Zbp`VPADB?+6d4_+|PVRo+k#0QiPsT~)ucpF^-~N%s&+_Cfjr9Hxzk4$Nw)lss zmkZ@sGN!|sN4^W6LqL8q7E^(*12QhY4?GLJ27C+*reTtRg@9a?3CEd$=sSM?C)~1m4*&oF diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-64.exe b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-64.exe deleted file mode 100644 index 675e6bf3743f3d3011c238657e7128ee9960ef7f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 74752 zcmeFad3;nw);Hdr?j}u==7yyqfJg%kqCtqpC80t4LPu^(N8=-ER75n&prFR&UceDB z@phavWskhi=#1m|%%F}lj?UsZGsvQt5JTodne9_xygp zKi+>{KBRBmT2Gxib?VePr|Op8w9@9V*=$byS(eSV22c7I6uw4&mnWJ z$MZk#s+do8oC$GRiOqJ$BTifH-`O?kw07GVTXsfYo9!LM+%035U*jm2#J3_n{DpIsylAeZ?oA}or@^cX*&;p@8Yl5zaYqC zqReLd_+ljZfRn*^ItAvsb0S~E#7db_^bvivWg&Uk_wpg@|NZxW0s~rXw%@JA7W#9w znC{QhVoUu#b(VUadc9_T;ft^jG;@np*brtX*3qDS^H;5NPdwDuuEig)w2D?9%(2-D zI|{#yRD9iR8?D95?Ge^qXDz=|8CgU9QI*v>6KammHk?*-@|>EZqYYnO$MQiT*8IwB zjcsG6_)Vxma~#U=Xm-rjtfpi}VFwC1Cur7YyoLi`)=#&Vu0f#zy$X$$g*3L%uW3y8 zmuYONzr5Kox_P?Yrm@-nV3;*)<|dyyN4-Uz-LyUZkNTT;gI4>+ToAv;T(1p4{=!XK zEb1>4F$Xl(sI2a*v18FK`oNW%)lhSElHqI)TC-QUqg#xxw0P7X1TG@+NBu#}xJW$Y z4{GsQ{sQzzi-r6?etCazhNb=jn^N~z-~hqkY$f^}g8yCNU9xZn3QMGGaTEl`MFX9C zG^k^_1rR8RtYQ(Z&ZG}fxIF8)$B1zR-ss6<%dcHRYkqOqs_HH5(0O@!H7 z(-{Bn=}Th=WLG2XbB!I3m$?Ojp&R@&FvUVkV@K53GMlm?8)Q{d_^}qtLZgkr!HyQY z(XX%piOS;*!3)0(v9>){ouv_)(%i?U zS|zq{MF|F?IUKvFnF@^q@cbE|2r&0wnTB_zh%nk~0w9tZmW7^zXwRVMAE05(%JFqu zi~-E^@F=^jZj0_N+-rF+c@HZ$%}o5%#{9y) zvDf^>h&rSL^*gD7~pzOHv=pn zZpOX|VMKkAilc(3scUTLaN!oqd+b0OM&e5aa-zmVIg^N-3ba7uqC91!t)^(Ao-0Z= zBRe=&VB_K>f*4`+Pn0a&i?Yl$8QqaZV>2w}Ro8`hpBI~vsjPOLi(vhXzC8J=&Bped zU6wJL|AUwqsICB*_!{IcXlEQCj!$@Y{fyvVRn1*ukl8i(qo?7gm{xW32isz5Se(%>1j-a2k4wb|wT)GbP)~3cw z?6fpLj~Sq`9YkM)yDZB*We>-k{xAm5y?nH0Ho2{x^Hypsn|E~r0<*jx=2YhD6NHvl9yo4U5tiyIlU>#Dq@mTY2oce0 zScIx+t*YHbRIT2s&bjqw$p*oU67G{!71sDN2sxTN5)0-oL1Aw=ob$3lFj* ztVs)OQ=VuDG#Tgc$T*v=MF_RTL4A^~749wE!fzjIvze_{!i$bjkvG#thW==gNvR?q zqN9=c9sWvw6oprI%*YEWbx$CY=-}BgsJF|~&ojGDfwn3zlecP(M_rM)Yu~wcoB82L zZNc91uwxJ?*>iE0-InZ+zyt&|243NM1(`ag6+L8(rCNqjEnXsf)~Gdhxy%nxd<%-_ zG<2v%HTr0NH-P%#9@h8)$xbV9#5j)t>pPHUVJX`#82c>$e2P5Fi^z73?Zb3>4H-a4 zyZAo{B_wtgf!oXxBcR1yzjoPeO~Gr4i!#^3fZeu!5V{O<&s;;BtE4N?q(qtks-WJO zD~v3>0nlkN*NA*{4_W;X4Io~{Mogf@=VYQSm6*9^7%EIIDcl0W%13KjY>-_uHx_7S zBM3Ta*CEci_MQineL{VRdq*QvNnCS;!G7c3CFAYj=nW|}g_(0Bp(?@#*~8{BOV7sd zDcx0Cx7X;?l5q+PV%P#V+gK1b6L#Y@;%u9I)LB}a`E+cYYNlR9TO8fRcYr1|=D8ki zBiH!EGQ4k>xDX4mXDLK0EpVV}G7x2RQ+WU4iC8DJH7~s={+*}g@6kFx*BXyG1VJP& zk4O6F@~-nB`>b1#rzEqq_{;*!TY-&T3J_Vpd32D*-d(1cjk$bl@7z}+_r*QACEP&D zVFxw8wdzuUVu0Idf!4+O%DVgW6fJ*iFL*i=X9BYTeFhw6BWnKWO#ufj;l&UybT5BxG@`(Cv-v9sK`sc!KoDR) z67}ijJN2A5PZ=2nO;9zBVYAC!b*-{`Z+NXe^)IaaZ4aV@RcC9R2h0yL^*)jOMlF^L z;kuNyhRwFi!;OhPMzMU!#EV1kKX2Z=l`FMaf1;|ewZ-_h6!2u#_t&h(u+?gGG$|v4 zHp+zm;o76Nvuw8N0?Hq|1`@?JxhMxg>6-ocYeRWFIR4u4*JbQaJ`RvWfLCeik3W>a zk1T?~etHvy@Z|K;PCs47?)I7-zb!EfMA;h!J^hcc1Etvwx*tQ>u`yF0zXD5Ky|cd( z{fLlbZ3N_cCQ^(~lR075)TG6n=-@`+HY03uch$J?TI-bfw>;v2tg<_7eq)su?g_88 zNnF;J*6q=^gv|!G5@o0}RXt%pRsE9a$MydHx{-RlOKar0BA0%9D(ZTf#|5d^vE5aSOvMb88FJ;TQa6RBDfP#(RV&1fQVf4>e zHMI8t#jeT2Ao(bv`ZIKiLhh=*sWGP#4Q@o)t1`u?Cy!7I+f(zogymtrMc5YA{HROq zusI`ak3LXkL3e3InX_|$#IXlFE;43MxT5JwHYitP({q{T)*Lh49jZgobClJp!)$BU zo+LyUZVj_7g1QsGhU6pWQYllhRv}>zkD+^~3H)*$Bbgb}+xSQ<;`f1gBW$Av`I&Dx z2crSD+_YWn2O`LmcO5N%w9$t&Xnp}X^Y{K2FlZ61txwY6v7?X$3-^|?qikzzmcLR9 z9MiKRfo}{Y64I#&Td&*J2qF z@)G(Q#-?r8cnF+(wfKYfq?__O)cV01?J&R5P~i~$PTG?FQe*<`E(kHnAuAkHCh49j zv-Q4HCK^~TjwGF0d;#q(iv}9Iw7}>3qzEuDHUfz%e^;dVQPET7kr#V6y^GJ1O|z5K z@-b?8hz1C*(E^=S5nw_e6=6G56|6$hMfa1OC*a<}hls*Jie9GWzpoWP?I&C;x{7ue z4C^ZOZaY7W!At@e)TQMgqFkb)@gi4uUE7eWa4*&6RO<)%AqM>~)Wx<+)rww`o> zJrWbP>=VHYSyOTVh-4o>jF+`w;M~ZV}s}Q7n`+ zG&RPDMJy0jI=n$ctPg^WYPMm8-O1k-g6C}7ed>^P%uQw8%8YIn+rwYAfad}1kc|FX zV`J{T&PK~JGLAH9jazaPx16@tH>-JA!1gM24+Cy~_#yxwn+_(hvVr;$8>q2*(!Fc3 znc%%1Z#J#Jd-TDqrWLVuu1EW#5jWp_A!Pxau4)n%il@8v;ewIWi)@}dDO+Fu2duNG z9yLwR?GQC&7+zE4$!MOQhiP#{xi900@{qmv8YuFEmE8NS+f&FOMq5I4=Iml~YKA5&&5f2La2_um!c$45?Br(nf%0OEiAmB;b>LDvByYe@O3UNGn zod#vdJ2d7&`Y9mwTn!o!+ZafF&_omg>WA>urXil+l!bx|{Y7@Re@PZ;6$+q0ON#wk zLE#o2xP(X+!#_8*ljt6N1bW7wWB>yqS_FJ~eR@fxg=XXm`?M8<`eM16ywSLUmf5SY zxx7;AY@|(*@xhhxL4D`derPH4YL9g(i}z^Ej#Z&An4Ga$NEldp!t2s&?;(B282#MF-$QpncdwrWX1*xE1cfb#mJHv`n$^}TKeimt>>$O9V=L0p`Js>;A3_ZF zYL@rZ78&Ve+pOK9^l5FqiUB~1_Ykt7&b4l|k(lVC7a1NslEM%|tIrpTLz?@To5x62 zW)5mDgX+aLHE^ivOX3{`)CwkOPj=EJi2|r)2qZ|%tZbr<3~NuiWTJP;6t9s@nNy!S z8wAS^=y~YrV+iwglf`b|O@J?_h{M1bI=x~WJv=w#!Iz_BXzC`s{|2f23Xx^RB#~um z0UpVIKhyzpY9TeJk3_-qsP0nPm;!<=+@i+IGA!=^#8aQn=&Rt3q^im5y^IG-SQ~pc z#EuGl^1WwcXJ$_QD|9?|C3*trZgD+DF9?O|$3BK&-9e>p7hW;=D@Oo=uP0I%QYoog z>Kc^j?_}ZvO57_FyC~5YVI2emmK}((m|U9qH5fMb|61TwRSy3RWi8G$GLoNC1eB=? z|Ai>NpFc#;Sf=$R8XZpc{!}L5)k&`l@EXDP(-jGD9St3!(H)O9nVyhTQVlW*NU{#2 zaTbwd+;b9?#b2ZSe%w1$MrGl_|AeTOqyx^9h*^s@2(QMt7T3?g!3ZBJc$=HALV}8| zYz_+GX?Y7ixXb^I?z(#s8s5J|CuM-187f zke^M}#ax|7@u0bzlJ|swx2E(aDAZEkmVX3Uulr@*Ks@+-tL0L1vsaEnRG^TY84`i(! zPFW@*!Sb%$EPDTU?7jJWK@ol(s~6vYc`7gQ8=gUxY@U*e>Pt~yLn{Y(zeNgIOeVBW z|3*xNxh_NTNX&IP9vbud@L-<7RORzuqC^)>gSvwT75EnP!ZR_l$sw!@TCgBiYeXjy zy`5V`ePlBseK}+u;#Z_AxD*Q!-p41d7epd-ROOgN^YgS=rH}Mgr_JqB_JF&TjS92- zi%Ro9>rkEZN=X#@Ji-!6-FxT=wEHow75c5+#g{3MKsy4$n3Kb%cSQni%ENy|4mSM+ zh0Wg}Y(D6;DN&LN&467W3jT^2P@u85!;ThfH>Q3)4fpbDwRV}UqWYdTW4vZgok_BR zem3Z48bbWPu+jr%{RDZ3*$&H_k7zd2six$2RJM!HKtIFmiXgkzSz1vF3dI%$@8iRc zeL@GmLogJ}yRQj@aV0Wa5M!Hi1D93bowy7mTiB4C7iJIm3cn2JTg4L>%|f?w+01Vv zfe)%KlijPnL<=0P%FzN{)tPEXiPL9HG6OcfFM1W|(#Ir+Xl#~$33~Q-XhHjgfQM2? zi)!tLk&#-OSoN|1n2Z}R9o}3JW()AF*23(g-qSrTmoD|^3f-X(D--9SMU3?mD&azj z{t8&*P7sJ@Hb5`F-*5u{f&7~71TNGL%sfiH{veLS02y*qn00 zX5_CWLp{H80FW1Ro&Ym8uqaIjT|jP(IfTYEHr)>~FG&j76D`yIRG?+Ln;sA(kt@4) zW*!+7MSC!%;4R!M8O7!zS)WxTTzC&G4N@&e$Q3Ky-Fo(X3?kkVBB1gQWZA$s# z0h+R5^E73{qwaQK!u&u{X%<034`? zm1sQ{9TAw64kXh_@1_H*(t%&0S@WnJ>MI0bzus(i-Jv|T9PB}f)&NYiOI4z@qcXdu zE79FFnq4JIbfSovp+v`uz_t24W>>iq{aC!+qz^H>Zd0OUuQ0nRl;|H(ETK7xCBs;4 zZiZQBqdrMv(|)_I}g z{xD0JjTwO4_*%=~rtLYJ90kk}My_ZV7)fSXt)Zg+I(TR!Wjma|4U8g`U;;X@B)HeC z`$Aa*^09$4%vFWJR1*F8fw|6WnnV6bff~Q&oBEKyGXC{>yC$f?dMO;J;F zq8M+gV-RWz>Y1g=8zo)IAs9bAaz$L9(h7u~C9DLhQsnWJ1~x8phdcKZY;IX`mZ-SO zQNkK9Jj>kb1~InTs`+teN#IC{a`llA7P7fyy204J0i;0HGknXKtw55dvYo26Qw?l= z$c4IfXf2R0j5*tRIKmp@(+bS4;^hw2(NgcwtZm8Nsu2jP@)h~!7;X3NNRQzBu)SyMnAZe{KQaGKo+L}RBKN?ht%cgs__lCP^pSt z`~l!kgTK*}NT4lkCZvDXne3x(psX}0u@CzA7=oaFFoBa=1$J6d!L4}NC={YqBE;Y? z1bIzr^O_MHPgdp^s8aT32s<;MwOeH;3L9!at3jkbA{1zc0Kq)Zpla?G^*|)T#Itr6 zHVEj41-c9fv)BEYb*(M z6ogP>Bt$Ym+A82jT|=|o+NGJBGx+L2dPW!*GO7IpSJ%fyptzc!0^w0noc{uCh{?5?@A+w{NAn0l7FoIei)SZXA`DKTwk=AP>5#r9!VYG4; zbc2@CE1AaRVnt#PX5(xux|3Rg46&Zk3W$}i&JX8;P?6NilL+vr6ak)TMa3tfQbq&` zA!IezLo?$pL0ON^YgO{VX=NUswm?5Sm7?KkI6{1U6 zXW}tDr^j)P(bGLiC4!ble!p{BSa1|4KEONrlvBp?Tdp`-$8m=({dq4M#N zwwp2}Cd;BeT}8`d^b7EtuaCy>`T9Wo7ASRjvIciTNmZ5TBLnutNzz^b-I<9a6f(DG zBtA!g&{0W0<@7U)ezX$yA^JeUvP3iT@c(cTnUNP4=`cve<4dVp=VRRu7X4GmlZnNk zQt0ry_pFuJZ7hLb#av&?rd0dIN)Q=MRiEV@u^OB9b>)Z%#cyvVE5;!-6Jh&H3axOU z#c-22`XEta%$2|tloxop{_4BB5ky`=s@Sl_ZOwRw8qtdiJ+Ify92OK}!{ zCR0oqVj^L)sT^YVbG-{!H8Iam5rI{AssDB*8Wuy1xs0}zDA|xA@%c`zq9E+}ZoLh1 zN^zbN$rIcPE+O$a;Eu#EE<+8X4+Q^62|p^(@51)%6mtzlvg+6rbLAosjx!1Pfok=8 zfU7kXMKwPRIlK=}b@#byGjlbOCEjWYG%bySP)7U{ugOdRL-8uJ)WD(T%Qf>dOJ9KB zQ~I6Q{MzjL9D2AhnOHx|`{X}q@oLe-k&4gA9}L1b*3glq3qFR}?gta-LykcZnQSU# z1$P)jmb-2h_7!~Rd9q}tinT5$DMsmSAj4`2)5f{k9XP)9;Sz>g!8#6U3l5fRjuGb) z#Ad*v9bw><-lt}!yC(Ti^K^HuikWB85^Xkqw+8fMl>|OhLeLw3^$(hQ?HYNmTuCS` z5$fbah$g@<)nbLp>ISnb!=T!N$-c1t8BPS4QXix4ovYSDxd5Ow=(5Hr8QCfHTuah$DnJBk{6a2pj<- z{#XVoA$4$Cf0g$47kU)7&?TRNWcK= zF9Gm)Pv0kLaPbBdf5FBcQ0&CK6Hxp%g@7jzkBuUr_*M;kYi#&`fa3djPx}=Yb_hcL zTm}Ad+Cot8+qAwM{5~+gZeV`?S3*e|7HG`jPn2f~h`&iA8FZ|~5 zK}#<{=1G(pxv(vUgV^D}5IuN?$;c153QCT!5m|VjY5G61S!8tZB_CT$EQo&wenlL%fD|7|`4RY-npcQ{Kj3#v$uKVORP(S@+w@CVasC6jIJI&-ua2GZP@nYg0Sb@i4{S2XTe{y(9U57CknKCer!(_6m zggOD^c-Tl5idqJJj*3sBVylG!5*q+HOr*S`x>4j?8ZP3s*rH)=x&uoUjhXNRX%e{; z8K|Lq?qCcF33-x-KwED6faH1zknBD4LATw2(`>VlTdZac;xw4-sdkW1JO|5OHqRI> zOcm!NI`bn$L+uZNAh3UFlTeP!p#wZc1dp6CAfJjB&Cw7x{hLTiIM@x#Y5Y@*k1*P( zq4WRxA(8BHja{nMb?C#*hun5J;S&4szeFiJ`BL&OG0#EsExB6Yf0q1?P`1m{?(qz&$-Hlq6DngjC3`F}b@s)wZ~F)^I1Ir-q)@t`5z1oBLAXN6D1 zON$L>um~$R355`!hqslooH0oZ15x#(KFL=oTtk+(BiOK~igqM(!?D>XZArLWZR58i z6?Ev?ismiv(|<}&XY~KHLAgcFX|Zylb6R|A7oGWV9MsGyhv10AN%IC)22rCw_Z}js za}M=POyH^rbqick9kBH5rHC3VWd(+un2s#LyxN$d%}ElqK(?=r;(^@_K+AQ%0#P;E$;fBfS>f ziS{XvyhefejrMwbvtu$eIgn~f(Q{R;DYij$qzQ3KF@K3%D>C3pNxHG7n#nff6L=%? zND*9{izev#W2TWwHzDFM0BL|wfgv6oA0jZR0SJ*{)C@)dF0ojd=9LRFP3Ok_6 zpE6M&oyt1C*@1&qa1cwq=bc$JKEtjBniu6ZmjL-MW9zUUvl$-n%?_f#G5o(MiUhAS z#|whd-?58NuY;IMrwe#JbB2f^$lirBz1Xv=?5N7x`IL8wfI|N9A!YSJHM-O>!WfCE zjY%CMud#aKXVc&xb>o<3;@HI41wC|oIzdHeN_7hjXBiQ5ImR?dHej}q?NQfa?F4IR zg&-vOSk?RvG4m&!f#9V*-lHQ_Xmxb4t zk=WvT1d)AdGvTU12W_c*?P_tk1xK1#4rVsp`8GA^-JI#lpJ)=YXzHo~x|B!4A@H2*J5_u$sRc zO7bh?5hsoZPP4z_FDT+t zrJhA8+P)J68kRO}sXH8YJ*TE`?uzIjYLDy=jtqT3O8Zu^aWpr}>gOD!uhXU05#8s0U}stj55bRoI0- z>K7vf-Re8=u_5?q4541ggL(lfhL4B`pjX1h)yMyxMFZT$Qm&j&VI73x*Id&83WX1(B;Qn!{4P^$+08Q3J;tU zupNVnE~X_j_A^nKxy})97|(Xo29HowCfgw0HfqCCI@8CuLYzzOu7vNvt@2DyP@X4+ zeTC@e>BluYmEixZX;ov7j@#zMHWE+>|LB%pDB%W+4}(ZSKU((a(Rsg?`d(A<~1o zAPi=TvtC^|;|1@8o!kX+ERhFlfZTJzzaesLgMA>(Hml^=ZYwT=(is8Ou|4egg4{XG zqpqq%t;Hc6DN#BVT?;EZg}ablc@?|We>{UNLz5Ey3=uRf#qRl$RAjS=yy`4c`4Cs( zx9q^~YPmBuCnr>Vhu^0>5*Il_{&7XK{p0lWi^}c#cx82wvRbnTjxP4*??RoIjsQS4 zS9=8xPl-{&eQUAFKZV0Of=gGh9Isjj1?t~4I{GMBsuit_Xe zif**)6O`5carVI;*u9vHB^QoRSHLd!mg=@sY^h^=VD};*zcHg|sIe=Ib*0qtUTOYY z#(E&G_G{`JL8|-Bubq0H`L##SA;rM3^|Ej4W#87zzO5I1n*%T3>vM4u@=K@al=5mO zF}Zo9CfS%lc!O^#WOeKXNjnh%?O+o3-%Aq!lbE^+g6sBH@76K&)`62~2@wL@dhUdM z7TQgoOR_)vEloN|e;e=y2amvXrxJY(w6N9(GUT)2Z38hIA{=R^mm*$czm(IoRb3;p z+=xwSEC3@Pl;oVwHij5S<~qN~{Bz3OZrUwln8w5lc1nXWJYfuaKYrqCxTryYJl26I zEhc~gudsJK(u#5!N*x@?Z5^(&Fk)~+pbdj$1@+&O3)^&O%rz$o@Ta?Dt{X)lC+3<( zfqkTI!!g8{{sMwH=2`}4kFCn9p_#e!)L2xj$7*D4q%6q~W!BnbGy#?kLADj4p=V92 zkJ^3bb!Ym3wvDwGv4myAU^HD39ZG8_xM)cgZqiiZ1gvPa zgaDxxl`CAWL@KnTsdtIOp7%6jWO`gJm*!#kLkan-xU8K{G2~*)MO9?rwCNJSh$RKb zRD0sY0W!ORJ$fzmy4|cHT-ZskjGidbCxI9h$Ku;Vb}a9`fDG9|l)ZqI?>#`u_Z}eW zy*H5a_7OTy12SaC0nIaj6me$)8M4mPwJd=edtV_W%C zSOIW0Rv#J0%UDbT)x?GoXOms+U@?)vZp_AGg7eYcE;J)Z5iRTG3DMI2w9NAdlz``b zTIT7;w}|v78-S=}{#vp1K82aRQj0T+gTg6^uJY^AEV!o3@Nc5?wA3wsVq(! z#9hxn2Vi2gs{m7rdKQ4TwbT+rrBHJ%8A+x$*LKnac&XnlG83bgd?{aaiJ6jh+fv-h zi+;!+WsCIK`UaGMVw%i)t|Nkfn<9z{Wbj-tpOv!20h%2o$ced--roqAEpHp>j(PT? z0@h`Dhy9xHC=T0dam~Jt`~kSi1wv`c6f(~rsV%nK@^+vkrW#@gL*DxqBaeF_D9)Ve zhL$*)$)8RL0SkiAyCQFoHa;aU`uP2Fut*;Q9ZfF3e@Cw&67xcME_VyY#3)&qtZtyB zDX1TMS53Z6lyBwo%_rZ4j={wT$hS(F=9F(sTVxb*^BLCcp=(L#Khd+UGD`ml}u&BsE3CSwb!>H$z z66grjURq$PAB&Mb3>B?^liKdm`d;!bb0?H5Y++h}Jbe*x)X@mXIKEM&jYeAX!$Pa05w7~N z2i+Zwxk{8eN=N+64^F`$JT@~Ab_%4KZC{(M8L(9RNjR2I;)^$6l%+E|M8Lb`+gx%) z&xV-$?*YQdA;h2(Y^33kPF4{mN_!CoBE2>@e?cxZqqrEv!KVAI*1*?rI$u6C1P`p8 z{K8ShN0K*~TYP{ZaXDzkJZ0%)%u}auPJr#ypyrQz2Vp-%cTfn&-z{(x$k~|81c5GW zK|fWuPajgam+i!6JA=oHiO{+%CHgg}7n3~~N{fPedvfsW01NXIr#O+7ZRW4~sOi8- zrEW8FDyxx=m>za|3!%Y+rj4vXr}=}!d=LSZ`c%5!3}*x{es2$|!1W)vYAN8>v*|jM zhFtUbkgCJ@QOvi{;#%x5Y`l63%^o=Pl1wh6<{}DA%wtZCV`GP;+mKXikJU9bj$sJ&78)VR?M*qyTI3Kaj0B9Hc`s=V)f zC}8}Zs5nyezA8G2qm5j@=tp3kgsK6{d=x>S1h0Z&?+3f(q^uRtH&eD!N5j=D)a>Rz z|FP_Ezb~-x>2C-Nxjs0QfDxW3!W<}Bi=7DA(fa>Ixa=a%b)oPZnV?l1gcTsnBJaET zSoA5(X1(v0_$4Ki2DeYtVtH=_7E@Ba5a<`C1o}BbE`tmpN0-i7VZikvsqx1v2781# zb=4*eHUxeeXa0NeMrlKN3L%mb(z1;>3>&{PkAEkOE3II&d^sspVy<&O1q3ly9z7ta zxZ*G>_M!6?JH*s<>4se$i94pW*KV_2R2vFT4&3}OJJj>OxvwFc58v%RsAW? z8-N_DPAE%;L3D%8^Ln2ac&F+LN_&oa6=>3nwMHD|h@aI3r7Hg|)bQxo3;;ss@E;Se zNS*2CrcCmSr1z;h?nXCK8l|9|t+d0UDcf^vAIW4~@BuQ4cJ9ZGQUb>UKa!=!NBrt} zfFGZ_5|1A~XW1hOomTEXS#JLS+j2v8VM_#U9T1q!Uxax9j1l%k5Zl*wBYC>q#TwVj zgLiJ-K__-Av?;h{1YWttbl%R$StrlgU6Y3!=#DgPk5s5r;7=66i3LX^l*_?EaGNgg z1D&ibuLO#{v)MH{kiM(3nCf{6}i_7H17+g-{$4GPq&2G`1)}AEJ z(qTrX#slqup+Grq@h34uK?O0|)zV;XB-vW-fqM%GJ}BhaQGPq{M+$YKS?JAH5Z`3= ztI$rQ!qr!ZReOpj>jTNn+uWF|HMTi%T#;xrK~deW)lTHXjXrONaV1l9I;x4VY3@?0 z^Afz^x(JuyiNtPlLz{adK_?{;WjBOR+Yr&{OD|C8V*j8AyV7YMbt`pTz~MD^Aj(sX zU)8a-lx+yPu zWn?vST19|^oyS;WYcw2WIP1xjBwUd9*E3S^>Cf81m_lkR%;>OiZ zeymsABNR8Fb}~3#gOMfMC7Fr+f*=ql0&oT{Cg6frh>(Nx)iHsH#79_D!H~qr(SA)-bbHc9<%GW@>Q_WNwtkONT*eKo5Wd(;x|I&nIcwPHrHCkPkXI)QML@s`}l1*;yJ;e9EoPjWV7Mk z&GM@c6T9bN=5`|!Cc_T2R$BL^k)_5<9sGeNC_Ui1Oe8ir)n(fNp0J}@-gzr%gRmbP0AF(0)FCuGvc+t$ykn3Ab`%25`sCddqD?5^>jhG$lt);oS0`Wc1m<=R?n2XqaIa<;K8`wp|(hzqRls#(A6J_U5Yv=F}bk z1~v^Bze)J?k9ZZF2pVOG8pDZBw;*xKR9uJv8`U;`jI`5n_-U zu%8GVr|ex9qXz0F*ujXq5XQBo`khqzHI%LiOpRCC_32v0SHk?K!I#cPMPr#%rYb_# zcgTIMJR|={#KTYCLUyyo4G$j8u^+V?&!Q!3J6c5}Gcb)cbL`i61!;zX;6MQO9WGlIT`r1pF8J;UKZSrf4*( z!96Y6-ytjl%YYRL}!S+cQ1nKX^EG5#vl~g40sk5QFO7ElK=GpAJY9G=q?*uHN zps+gR)?!l^fkR<>5N2(LgIw8R;nu{d9CE@SEr`?+yiP)X1y0;(YXK?!8>s~jSI^ce zu))xvHmtq|heF{$w5LiVbg_)GK^WQ?>pCwT1*8$EL2w>{K!24WZbG zmk<`N>4b%{wCjj)OzyTho#9&>WS;xcWw-^xD^88;ew;7dZd_=2e-V4eVC%&sL$XlKkbiNbUYbse(6L}GX?@6Fxi#j*nzPvGx34pfYR&fakf zfpd(`bl@v;R4k&O0xkczwg)R#Q{moF{AxR{z(6c6D7%A>g`7guS_M}FUqH7Et}*9L zLKikAoAe8Ms-SYB0$BSO!YhT?w&mT3vT9(Hkxiz$u`oS{*|!)c_zP2|a9pbn?9}_B z_ex!a2FhD2;>FG=IvEk6A|JT6)qtnbm3p@4H(`5R(N1;l5%#_=07D8_R9u7#5;l~i z%eZhwBN*C_v#Bkloh2#TS_dlbIFx(KFBpF4%!QM9mvTbDY4@s&y_(`F6P=y znm5dmG2~iNAbo;}>{{WTLpPj)Vn2kyD3%r>QwzG6`yb}&{1-~YYofrWy>a2QhtB^s z*evXaP-1mLnsc=wIk|{bUImu73Dppk2)>LUR>5%LLCbqlukcFBg4_@kWa45(knem^ z1akTsLMDAGA~I&bwx%%ETqJNPqJ;KGVk7QGYvIl}5t>h6p;(Y6tXP%BmIOaN_b0)z zWxo^btFWOIDtV#`x&UfC|K(LETf2$UX!)fwint$9AQ4Kvyb$u`hFcnG5ly;Nc~@Wi zEtnk5FBRS}fU(yBDOnwlK=CS8Ye)-1Mo9Zb@MHfVng+>|2U$wrDLlr;+G^515wIm; zaMFHa!kGabI;|e)+h6|wT$993&u=gM(+z3|v_D}Px9Q5fl`CjQ;0mc*U&u6$gx93+ zpX#~W3RW*%EC?-`JA$hfJ8>b^p75AAbq>>47s_3O)eQGHifgEf5uTI^k3x8ejLyO} zRBOQq?NGMi_mucODSl6g-{a!JAJbMDb9_wqEDOLyW?UDHw5 z;wk)Plo9@q-v@T{cAQkC%9N;vuJx`^9H*@B1HWSOFD2%m%J>=fc|@RTZFk}wib$!< zV}BM}b(PI@N+%lN1bS21Q&kuda0nPTy^A#%>*_-g=r`+wi)A^bP9ZSR=6}LG^mEI5 z$8uU`eyY@UQX}8TPvk}5XBT?$BOUyBTXzS4awgn#iw-CNn;Dv-`~#_wD{3;wKCm0z zm9#=|N{1^V5c6o;;-zB02c?FllpF<}6+^p&H{8bkHN@w&;P5v7I?P8>%{NI*LeC&% z5`&8MW*M;!u??J1?8-(0#4AXxdyWX1&y#$Kp90j<>6stt4$>MmfWL%X{Qd4oDbPZV zowj3xfe9M#4L6)rj}nBqwr;Dqi!XUMq*EL*I2&Y~oUNJ1+7?eoPws>EL@pV12Q}i( zM1{EZ(DH8Xf%(2-*A2*rD<=W-2nln(W*%=_L{@d4P4Hdz-@wO5ArVrf<*i=|L86s! z*-9ryl5cZ&I^jN<@UlptZm&P1PX*+%j9wikA^QT%l=uv|VIK(x8mhO^ zxX(B;Ld%rEw-hILA%{4=F@{eTV9Y)pjKM@4WdI|)C3%H7IWd{XFg<}ed@DmakD%Gc zTUs#5TR9(3yPpSKIG&M&JHyQJ1alU@3)GH_b;jGwiaZ;gUXv@P5c32q(49p5!hQt0 zIDpb161WdM(E!DRpFfM%Q`!$f_dQI3zY3chYe|j+U_rf)d0U<>na7tuFOO8N0e+BGORrKMmQjjnpW7XDHx8PzJE75l-~yPbM!9=NjFpWf_ zU=hI*z((qc&-x%AXmcVT1~^9*2|M8TMpK}%FQBFE=|52MPQBe?q%woDmf<77Ab!egg%_X~D?rP>ivU{>kH?!;bLkK`YWvg`p&^m_i2oM( z5rX=Vf3|Agfg}QRb}~%YD{T{f(=UPpqn6(kcHq+wuvqYfEF38n5+;_Ya@xhs3U=Fm>xW_@jPZ)(o&+@*uL}HY_dccmW`6nDp{lVge{)qA@ zZF2?UZ~{q*{*79rRZDXFVEsZm_wV`hRuB(W8;X};JCM`ZUA^UIp>0uk{eM2DSJ<{XPhY zIM};c_Mm#)3Me|P%~P_B?E1kf&RfxcI8Zl2z(BC}s5Q`LtJwD{v9PkMI2j~0M~Z(oe@*U~j;`R!T-9a9K2E02=Nmu+50GbxSM ztH99`(&gcVLH$mwLMCDlN*!c-*|X8;nJD#ReY*hn)PUGGXAlV(%DmWM)og}mDE&2x zzj-lO>+o88^b~b-^AC4(RO|nso7({=O_D1C`j2+?T}U!#boFxT>PEzi(Ygvlu8Kp* zGAiLnEuOtEQ;{-; zw26qdJ-y754hvVf(&w-$4v-W5S^UFB;L(Z|@wEt~oJ6on5pkAT1kL_S{@op zrT(vkn5hqMBE&o^5OYX_gONbYSQF9aM?lQMa@@J`EfA9@5Hprv(_NWdT6&>m-Ww7n zKZQ5KhkiQmh@u@K_{-?|h?2JsmD%!j&q0W@EAzzZO>`ZpFRt zi?i|3q-nsw2q*c>Z^LIMKwVn?0Z~@&XoG3J25L$}Uq*5^^k9i879gcPd@tuQnhcl- zWhJzgr`sCE-Tenj13Qdd#H`(!gfpa)fvcJ^kKQ z^uqgx|MqoIZ4()g%H(Yy3vk;Xbb8`YVZI2sOOu*%V%c6=PdT@dCHui?Cf# z1M+e>nuM_7*7U!hhNI_j4ipzhuAt>mob*yBZ`LP@<6g<+xYMI^C|bvo0`GxO!njeP z55UJ-ijFCDF0l3xKB|Re%Wm8V10g9oBY}^qhAFF|#)mT${|ELLkSpk(xSd+yNcE>G z+mzo7DfqmS`U!qsgWj%#JZFpLN>GKOAw4X(k@yH!NdYgmjwkJluGZpu{wa-}LS58~ zB3mi#X=NAfraooO`7LO~7pkAwT`$C(l+)arGPIa@5>ZTz?~$8h11~62Yh@fYVVB$oZcbI z!|IfVS70Fpz$&a=r=>lHi0#4ada>!bINSo!D0WMk7BkAV*s{6U72UfEG*h@)i7l3I+BVSHp$sHi)JrY=<}-D8HO1 z*rVl*+zTECO>PN$I}|(rl?~A34!68#-$To+_c^>mXCG2R?}TFBC-4?wx8Ul6(#lX^ z*Yb;1wgn$3QS)~Mi;DEDuw!#zmvI>G<|=E88=(Pxx5E<4`40|4iNBC%l0-qU~xX(Pq<~lq7izW(gV#H~b;VDhfQhXTT zL$~U9+ww*MX{4en6o5P56x5-uhZUIqDe8uQ!%C^XZgb*(yqjsyKdmj?*+~Oj6`2{2 zT%L>Bjc*~vRRw1w7Q-ro!EbBlH_b*Z*n{HyVi4vdCHe_wNK58+Y|oOpJnt(SIpG!t zOEKJ^am=1FHPAEyVj`?0SJ=h?Zb<5_0IlVHZz0LIfkq`d6FJ#+HmozyX+f>XO5G(i z*Kv&d4P>J8v=!}Ypk0ZM5_MijmoR>qRUKe;HNb=#fb4@CkZj2D7_{Uzl*cw=yv9nF z$a-)aX-ZnU5A`JuibCzn=Smc4ogD%Nup>n-5hytCdnmZ!<`fE`DF_Gl>myqnqWc5+ z&@aiEra?H<#_7xssS{SBaD**eLc>T0q^97# z@L(ifTFG{^UFeAH4X;Bn(#gR=4R@|16(25P4XCg?i{<^`ZX(TA5Wh1N*oIrYk0)|b z9m0|{m){QOs4!^=ZzTT>Nc%*pi!Z{lU{K_N#aTVHteGESk!s=_Zlrb z)WGEOnk3PsaJ23jl~O0!KkI zhYb9Xfgi^2^rhvuANZzACEZ>i&e~%QKA=Kfwi^|&sDBNJAOzXD0Z&?h%LoDFtX+h} zml26zfrju42t%7m^fw-_tME$Kw!DLPAHN#@6A(h?r<}Ft_Hx#)46~bavEIXBn~vau z50Les7jF*|Z!Z9E2Y)v-@OJdc^`B1x9KqY&A?BH|HsvQ&c(9bUhuAS(!X962CqkNv z!2saiID|lg2QH_-oDY7`q`PBNzeVqomssA}KcPg=CwP?{d}k=;*@w4KV5brtC+Sd$ z(xEr-a;1*^*_bgOA4SNd8$wy7v-6fE7`O6L);t`Z(?lcSxq?O<`z&t`T8vb*g#sT* zZlu0W+;;hVZB2^*J_LeTd?WZQT(eS?eQ}!6WOe6K1k3&GdLrvKV!1d*d|cjn+s$&H zCrdk6E;@)aqvMI?!fOGyiBL|4K`CXMh_=b?moNNJB5whJLq&g(J9H%*su`` zp_|yR!$pvO3=v@tOrwV*@G|5|bz~ntHw=yqAVfZu0D&$Rgk^af=K&h9mg6)ncJUWi z6I;V1aML9C;#Xo41ThITOoB2@g52JdASLUjY!Gw1=Ri(pz1ZfTw z5#b~8N%Wg&p5_28zVg;HT%siieQ?C-Bq{I$80X4V+YwQoLTsejgV$L8Z%%mWQZ_1&dmy)LPw)h_sA%xh;f$UTY8NN zmvM~@ICPxoc4lcJQG7zL9iQ6E#7!kMc1=z6{XDcG8bCv^KOzzz)T4jt@A)B^{=S|M zmRp=zbmGSGSy^tdXrC5S+amN?Jr>Gpr`Rs>ojny=V|**`Ei^VVL8p&;*SAuuJx1=& zRsULp3T;ZBGfT+}Wd*g`#u~f>j4yB?l5(sG;yuE0WP1^%sW1MnapPi)tXyg=53k`| zip!%oAH`udGzKZYjpCsnkE8&zS}C@jV!MnN!?m1RfIX5Pib+7qFZ->9OdIrc$fU0SrVU4#N-2()!Ljwe*Uw0G# z!|@4abrB}o(J&1V&R^iWh8Q3qZjfw7#V1+&8*hu@sg}djGu~o+z_S+1@xfTouyhZT z9G}Ks;}c1>NBHd`{DKl9SwQ`)EE**8VqDaLM8{ujmZB0 z-T17doe7=gY{P^R_o|V>h=tw!KVc!J!z(-{19`kg27G+642;?If__gD?#C5XaKVy4dxhrbasqD%fj58>q50_x%}*N8 z$EYf@DgFSU&%M+GD8A5%uT?wg<$<8ce0%^~zR>T=!rIt2hBt}VBWO|NFHx6s4 zdUykULT@D`l??q-^hXPzhMP4Uu+aiori=)Jn8Ts0Tw^MNn5ChtJOjGCMjw3!cn7Up z>GktB>GH!x-;w+ki8x73!g*ILqDxL>H z21b1IXOeJ!O|!GNq2dUlf5=cVfq(FVFjTC=ys$eRB{)(XM9e3q;2zo^aw z@>5O^p+52TCQzaWCw<+iPc|h7;ss}tr~42AC7DfRqJzD-T~zD7eKoarfUkerF9TX~ zY#bol;2U6v`S>?50&p?x(uzks{vxnkN6Rk^ZHMk5kA%BOIf0D}8Rs6wx&}g6jRZkD zCFKZELNz6TV&2*SP~+Y@kzwcmZtq;+qb{z+Kbr?EAz>3pAd%N1QPC)dhc*zB#K-65zP(C#-7PQ7ojBwH;@&SW8qjf%QVvCajqt%$)`Kka+fLiw; zc=fq_t#YfE`nWA+FUfd2UnW%FeKZD6Vz?grBrS3VspjkKb{XT%XIW5}gvM}K%39MI z!S`|YcXYb!??}>e4<;E5g)goy=Tqgyo_NzZ;q7;Q}mrUtz)}YKhQ(&b4S#dx6gePanZG2 zit_Ks3;(e&Y?^1Slw$~=7;%NoL5^1J3!Y@=YMPX1x)0I))uobsGrix{-cIY0TP86O z_jSyYXZf4CY^!(GSh1Ukj$3}q#SU-u%G_f#-^nc%`n-+#q-IvaMF!?u*XGJMEF-W4 zf_*sq|HBog9n*&Bt749Wx9SSM(O3s z%Q13$gyHl)F0~ZNY0O<@BsJ#F6CbDe9PfQRS)i05IhZb?g99ZLha=_%!Qyge`&(iP z!`F+@JmEz;Uhn?T**p+*IjkCYj(1;c9J)}hC!Y_sXGf0l?r#-!Q{&{8ygS8nO2(D3 z%mqW6o<=#pVQ^@t)63O;#|GnapIJC8v@=dlvmL{!7tg+J&R_;_`L4XTS?avN>$?Bz z*e`4{{D`L1xr{Jz!QuRM1Sf~Lh1y~aCsw0StG*JF1y4ZrcC@*i?Yr$tq#+5%fil$Z zl02)nWyb8=GqiL6JF(yBs?Kk|NCLzdG5g;+!tN#G!iX-G@Z_*HD!ZHA+eg-UG?p^u z@_^`e;?*~X2yg9*7`1c&eQlyGd_e1hOwL6;85 zd_dx|v^Iit)`?pLhLOe5ZR+P|$qJinQ}bPv?h7~rgIK}sZrs~ElHPeX`T4_%&lIv@ zK5d&X!zl`Hi43^&e{SuG%YnCU(Lu&46sS3u!{Vw_s}WLscI<7fhD2g%Y2m#!(P14% z(nr%QVc}+qlRJFtIuRCD;nu>!d->tNA9~muSZLWJlLy zsr+@OWmEYwgJ~vAXzFin(01Tf^3s|1a1mYy76q>f9d{G{_!R1lJMKVi@QzTP~6PxgGUm zJUMj^RRC-<;XfFUns-0H<3VeKG`jkN@K@Rt-i4Pbwrlx+@!ugXNk5H zEgh6v2jOPh4>evF-5L3ij8 z&=s+1&rFT*HxxE8R+MiBo1fg)g>lT0FxJS*cp=R>&3v2Sl*-)D6)kcRsE^A{T6ZU? zpXe`RBQ5Cx+}M=vala-jxtsR+xQ~d{mT+7$w-4NCr&I$xTwD}pG?&Xho)A!vL1D3D z#J*B5+mZ>h!o;ZX-ZJS?4)n%%F%0uk>4zQ#PvQ2mJa9E37TKLeG=NzUde? zU2!+A(ACf<*DCfHNmzRz)<&;1I(L)Cp}&vg)uJ#vCKAi#MplIVcZ%-kzMu}yxtepV zlo3jZ&i*3r5x*`JfzIUiB}YLsrwil5Oh{*Bf#=3wgvUN+t__d%?~gEn%-{4)oal{j zGS4iCHN)FCwZ;2lO&^-f?nnj#A1W@CM-rsqXOT#|o5q-z`>|^UFP244p-Gl}k|Ra> zrmU88c9?sA3O~`eWXqJv@Rz*?7V(6_7QpUM{JV6ONKA>l*>I5?vse;oIA)v2iCqHs zHc!8VP)Q=~rj_hPG=6o{hw-wtjY&{W>P6QuE`M5d_*%DdP|tz<;zxj5(aH@IUt_{k zLR)pW^$zrdD4{hfvo$On6o7*~)&`w5Hwwq!wFE4zF?Ni|=x(nz68l&jVlk$(k7p3v z33Xu(eTN4c`)nVZw;_v3XFNuRs6SmTO-Lq6o;kCllXb6H@s?rL(i{rMdvr#kEyRNB z!w>K!FFZ=Fv)DsN*?bKYKw~KUk&nYZSQpQI232~=q-9Pz=QZ=`m{EYB;i=Fy>2Q=* z{p1_F|D9=R_UA_XbMUI|TnokvLVc%E!o83v#r)tdJcN>6d%{?zaD88d3d+>4YhSqL zX#2vuatJB=!nV4@6kFY4rYJJ3MP00Akt1?*Uidjw6KtiMT|IPesz5S)KqQYkSPAWp z?|`9szMQkMX4M0>E7`S%`;tX86^)8N6qMC5>OAywo;x)83q|bcNAg@R z$Mq$yrl%=WVeWndB^{BIwap9plPzN&>t`Uy+*9->kXW$~;TJ_7;vth`$!K4DGtf8b z8WlXbJ8F+;T9e4un>dNM*biV`VlKRHnc4g7W+@ZrnztL%j+lT&6?m;P?W41G-j;pp z!dpbAdB2{FaU!2x=45tHQQ}xWNhlMHH?s(#Pcao{%l>oCVqRM+{Lww)==JV|JO;XWU+&Y! zv%ajS(I4Bwx@qq@wG61te-2pJQplQklPD?sTl{-OuKH{dm@&1RYIfX+>&QzL@qFr< zd?5!$bqV2*WqQ9~)^eWoFXz2;*_98=1S~tWC{+bVBfr@9NDb$kmBx2_N=K0b*9Otc z5QWJYPF6&XeAtiJmefLXjS` zr{;;Q929e@!4pi!(Th9y$J`etMTrcTy^NRH0M-S2)|^KV8gU|RnK$FI`V!J+z$@pN zH-E;U@J}fyP*M>Ky@Y&>H}nKF6D>H4FU|2Az7GgJ<=69vG05P*)E-zjMd$Pj?&jlO zD+w7+62m%Tzo7d=jC=@*Ju`dEjGmheO+DXQy&XQ1X2GF7>=vWOG=f#f5qMybCyNOr z-Q)QfSooR_PulG{QgL~rMzm@RrTG@cgH72d z+Tx6`iWbX6BgZmKrRSMQbsY8Vu}+PY(slQZ+%uM~rvjoC{b*lkV?M<|bUorfU7tQX zcf477gT3LxVc%X1XUnHj@h$dHKQLjv$q}2wrh|cuNEDSOU)n>OF z=F2@FMWM%J2I5$nE+b))rLwcj9LScI{w&L}*Ln!Sy3ZoahJjczKC*@C+7Or1ZbCoW zkfnvi4b^sg=Dzkn3T0`&MbY)J)5D)i<1E_rjoAKt-rUft%Q@1s^4`ow0*isq;Ay^|{2qvM)gL1KKC`dB*U7gto4143aKLQ_Gi@uWLdOT%q zQMV`=6WD%nhtEruvAxKg{s%$D)ij>QDJSYSSb8@`l54~2Oc^3JwK@B5>MAEU;Y3y5 z!`3lqC>{{2G`1{l+3XO?m&ln{ZXdGx$ow!S&Gwi(P=b&amBAeVhgl+Rzn}bQOu@Qo8GD zB~|8X1a4>-rrILlenU^yN2PPwnP zGwp5z2C=xOBs-6iIhzjcS61&GRTt+ekJX>=B#uuK|C0v}Q z`APO}`}?++7s}#}RyhpE zXVrtgRx_l(equef=0i<)jtZy!22S(-PPkrl4!`g<=b_p87qkz2oABe)+Laq3ZZ)cqfMdHu*4f*KCCiuMj!bm%ByO&v&q!MwIUG zpGCuC-9`tDq>>&gkJoHN{QD)X&zHMx30Ep&!S8-bD)84pZ|=*%w|(K?i0tOejff89 z0AILT^mdJYWae6N4`1?fcgTEgOZ$Z+l$ZO|QayP)SHC>BG(iuS?H*ncp_8?k{O75f zETJAH9UrcZmM!xTDQ8EU4FbF9T`seAPY0PN>XK;P)2@*m7^w6kY!#!gJ!ng|r(~-M97pemeLgAEJ2LC2#+3HMDD)+3j&R9`Kw=@mM!1 z2uFN0#s2wW&Qlbj);<`cm1Hl`s=bFqzHBebZ<={4Cn zR9@_%<7(@9n?w@@@AY6Gw)D33_|m20Dm#C-2t5TS+}Gnq(Ysr@`$Y}*@k3Y{`(vBq0H zY4L=MlF`*klf`&evZ6!o-Jc;eo)PvqH9Z(-A%GrodyltrBRvv!vbm1DEi~Gh`E?$7 z{1y2xAoAZL1|v)NSLl+CkdxfQ#)F8=oVnA=1m5sla?~!|$SV9gOvn zu9{JWxgWTiUc&ttEruEMbLNB00fb{IK>#Demd>~wLTEzKgA;94T+4CV+pK`(ahTV2 zBNq>zwuiSMc>bAHntU#@r4j9oa1wBvv$M5e(%9hM&ekr|glj-c&mx#qZw-!ov>%C@ zC!k;@mNl@;MYk;CbZ9&M^;X8_JnWcl4ZdH{e5#1R0S4wp{^rvzCP#9zwm!VMpBR%0 zCY^Eto<_D=x!*cYcA4p+pjMgnvhwYjjbx^UXnj{H7ALXKlb8FAA?oGtXgiYTjl^LB z_RZCj!B%5iLGu`rKFBMp+D<{X-U<=1L#!hN6nTzUC;(E%4P4$XliGtEZ!ah_Mdmn@ zZECGIfNf?L!{LBq{NcXd#wGD;s;g-&$$E1xj91v8&=^v9eVdA0(R^CHq|C8C%r){aHgQt1?^vS3opUS$l29ru!!1B;QO$J8tf_nq7H z$Dqk7N7N{oSi{@x3h5Oj?5vWbccU)sHxyRruq4s|Dj#0eg-UxpT#KopiY%Y@U-5ouKb9>@#_+>g<`mGBp`25E=CDU}5k$U4#pQgl znI~u%RUfg-^H?5qFBb&HLLmSH6 zs@<*?boNKW3AMQPN3~in~gKe?==2Q_p(YtMj<*39NS?cdh>0 z#9#VNTc>8QFoT|vbd$uUMwSqp{v$F{)MHa5iY++0>uN^3<$-1%V z|0T=T`RqeG=y~49;cpmxlNWmkh%yuD$a4@Lf*IyUve0|#Kg40F%C(PV<%11%+R&#= zU~=P)70k>-@8O1PIOKw1@Grcu8+&qWsLu$m{!1fAjl^8QD&IKgdL-CK2x|>p3x}9< zNSWRBu{r}$erdm(&*4w8L(sGe*Lo~%Tq}v^zGl4WTeW0d4#qbLmKW3M-QDSRJ-JIZ z_tN;o)e~E^rJj32?;T|SAyRI?-}XYpo4d#Bnzjd4C?q2-%xn)1H8(a&u@Xtnd|o@H zYiXY<2&~RrgIh0hI?M-NB~nY$D9VMF*^F?LE)%z*W_zM97%%W{OdyKv`}?i^+EoSF z{k)TRa2p%`QXrPZFs)LkqLI9zXF9#HujjYSad=y*_WM@)vitcacN+7f0Z3sIDH!LW zk5;%cA?i&WIs~E|kSLS9jc9C)jeaD~WQjAJI2qk>tO#EaRpLyJR*c9C>?zY^635vx z?Aq~Q%To0&8F0&3-Q?Wv>dm|miq81^kKkm-WsnC0BOj4#hg7f>yV2FOm~Wti?QNOO zP-g?Yjn}AzVBbc}M8rkn8_TnuU-`>WRC}v1`~fG3WjOZ~loom-?)B}v-5M`3c8}fg7Mp86Cx9AcCxbeQ|snMFC*gFX_3>mGdepBm)xTl z|2v$dO-EFaTb}80T`Lo}2ra3b&>oAPF_C^kD@~qo#GCbrFoJ7^tUTv_>S{89UTuml zKkJ=+v5lOGihZa3x59(r*CNTGFXNV_gKYgEK6_(dqsN<;^SDZ$=upOcbd1wnPc}K^ z4dSGlE!RZH8816_?LQ*z&eq(`K@2Q!#=vsq;-2{Vja;${eHpWo7O*5`Rcw?{_(G&f zp)X^DhxtyHl(P0jQf*@Ge?1RjrR+s>{7Xy`5L*kvk826voAuTUCP&neTST0n@S?UL zV{evJoC=?Edtq>JXIlPP+&j#HpstaAABOU=MK>`Q<&5~*Q#;vTwTS9*-LyUSljbGa z{&pc)?rV=pQ#J-vdMC|MM`7NXEmOu6Lg&!cU5v|`WoBjQ0KA)rUnL`dGFl!iH;awu z80(6Fma`9bv2IM|q-4#yaqXMQk7Kp%Uml5dWwvLrE@bBv-BU3(@9w9BlyyL7+C|LI zX|yZuBY^O)t7#oB*r{epZyr8N7p`*Bjrw4$F{83M3kH@vqSYjfjF+hR^zfP#t>Tr% z*^?u4h0jwDNh%m$**u8ZhShiaw{Mn#g8zjU#EBKKH8X^XU)^L4dG8H8Gq5( zRClJGb~4+WT--3!{2ePP)|h7Q*3NkFYaj8AtjI3l07&@5$bE3n%Y18>OED3}Pc(nU z8^hJIuDIR9vaS;ICMHdms>8hQN$f?UZ^f{B6uoz@1=sd@wC$N;<}?zY@CHXKYk%UlpQ;KP(9Ex9#(Mjkh=S{>Z}1-`56uXvPI@ZHQ*9 zX@VT-ZURIV-&t$zE`s^mB8`3fU8ITu25a-kb#p6I|19%vD|Sf7mZ4gT)HC)^t=N%T zB+<0D*%}f1KG_q(?YzK7( z>z&_;R(>M=Rf(u6TknS$__5Z3%NE>M8he{WT?EGxwoJudJBAzTLAv9iNsu zNAsfFWouxMF5#jF@|vFGob{rO-VMo-zN{$+e5<%qtRS=4yla58IirUJZ}C9&Lab3d z_9s_;+Wu|I(-$SmCrwop#TYSFG4RV9jmS8DssbrvK<;K^X#1)30p9S(k(4K- zeMJ(UARx9QIAj2coZcrIc@?FQqJ|Nx;`=T@fZBa*Q>KaU`bKX{-g4TmRvIayd>&&k zrZGM_hCiPsho0t+bm9qKB$e2ZAm1=W-Z$?jHHt0nC(Iog^T_6 zX(vhuOf-sWt!stMh@~fO^@g{P-h|1E=~~Cn)6`*1Iy_a-+|N}VB(2jWeJjyV#`H)u znCma=kJf6kOnVQpFP$IuZB=sg=3r;qIVb4hZxDqscd`u^&S`%R;xmKmOndcsJ#Z9S z>Fikix6+Bx>9Df(G>ORkX7c{i8NW7z_-$87lrM6tOd9%l8+Upl{Xz#~gK;>S z<74xZOO1}(BXbNv`g>iO=>=3#x$z}@rV;m}cjH@WI1wr^vUxMC=xzGkSQPHh=^PQSe#P<)Rp66K&M-R+HX(CD1UHJnW$%l0>Fo?J z>=<{et$J3X17^O$f*B)fI-5?OW4Lq_`PWC3CusnpD7}dsWU0=~BLnexKo>$|A=YRf zmG-{kFTrHkrFirvIqdQ00g;&g9pP=GH*pgO7@RYe?N5}~c>^5BTZ}TYcmrhe7N_)` z9dRl+X622#7mAF0)IlqgBw(L`zLo1NZ)dcdvKqasNpOKReO{W1YsJ01!E?t^>{ilM z9#@mx=q%1gV~GG1WxkIOLd3kQV0iCdTx`UY!}HF&w6T&?r6B-ik#-Yljw zZXI@qYlR$UWs}p_d61D)PRnZgL!D)EN`tPkHA=2p@sQ@ww4{sfSP!LC%AC*ovi>Ai znq<}5E!=ZCeWvfz-~FDOUwti}gT9qb8j`1;w1T5G3T!!;H&}J(YWjlFJW9lNVWKFO0V_l#H}}(pS3nKdbzg%L6mfn3 zBaJrPMd^ONLzm9g^tR=x8Dh0~QjB1ZUTzVx2=?B`rHn9I*;XRMZgDd;S$7pq# z7k~>|ak(EXd&8a`l=b(lx>uLgY670d50*u5IqYr*9%qd+$6v?yB1gpEQ=I zgwmV(oNb*7CYk|qsiN*+Fz1a_E9uaNb(q1XV>rvc~#ta5mwNSr6f%Zkh6+BND8n49V>sYtIvwlrl*M(n#e zePPc5!e%pmQFtk`hcDa{DuQA@k39|6U%+w=bKpv+H5W8 zaV+a4!X9M_$rK$CNo9_#8olCYD0R!&Gf#9g*w4Vm$_{gv)9UG7#gYMEsD1E$NuLxk zKhz^6D{68gOo{**$PVUDT3+EfqjLRamsKzJ1P0OJE@6d zLAYBc)e3a>l2?w6Z~G9sT3^mMgR9wIHFmP4d&RQLK#S@P6o%t6x$jr5YOEqTnCkFF;u$2Tt@oJcp`A+*x$XGX`7*El*vZsb z7I*^JJRBKeW{^(-@>e5x>Z0xPG4~o`l}?ts8>Kqf*g(qIX*TG(VIk{6y(`r{5nwMx zc#z&#>z((!--h#gT5BJBkP|@4$6Zw%d)-7m${HaZv{8g#jNBw^-h;39;>`A2EL8Ye z(fh$BQ0q)<94Xu-CPP~0g3AuQ;rYgJsVlZkw+F|WGpSm8rExmWFkdc|R#PKFB_^9? z4+(h@-SbQ2SkIQn6on>Jv8L?{x3NH%pZktK{7Rmya68`juhqi`>)^Lom@FL{dBf~S z%AuV2V1M%+XlzMkauS)rk2qN*)tUCn2&r>eafcivI29ZtbFR5aIzuLBJI!s>niSI2 zR1ACL@$@dKd?dyjiMW4{e`u$F|2zK9UD~?iapuCVjLfiR6Rh^XI1DL-RSzaXO#?`U z#AW8U)2!}FT<&T>KSN*HK;K~L*;zHA536&JW$y!F#WYeXyLFAHi7?D{h%95y@ zbp^58C`0&wgmZSLoloAf{Qz6_qeTuOUWBT*kEyrSQYA+?rY^(Cg=hj$6FE`|V$4YT zEN4L(9r^IPh{kz*FURupIloqTdFwpPN4rffOclmqNnDV)v-0gkg zODq6+5cTE(@ioLEkjQ*v1S00S1tQ@2r!^KhoQ>%8Kg+16a+dS1&`8Yg<$taAkBOuc z%HdoVNsfL834C%IxyUovccbJLae4Q@KD6~X)vB0_frOOIDdn;E6izTVR|{RsGu@)& z2_1WEJik_j`lyV7kp%3MF&S%iz!`e~pg;x(y@@b;PL~mX^v~M}J)tw)-g0)FujNwa zoBMsMK4msLi1RkafTbxM$z0l3>(M;yC}f`MG3S#%?Kl_E8v$$nd>&Y|BMysk4{uIR z@PIdGk%Q^nHuU-}pFjPsifmUT^(-%B~2+jJ(l@C6oRrSh&^XsPkxd5 z&^IwbxkmE%^Vk>5{WO>*!a@59 zi#Qs2)hR-qePSyZVXi8#rIIts?Np8Hk@!l!NsE|Q**wj;D*ggqVeXaFxIl$V&Go{- zJ|R@L2mm?anutKgDG5uP;I*5j32t$=Ea{8ZLM-EX&_sbtD2hlZm0%`Av;5}1^66MP zG;a3qDwgTiPN_;+7;Hz-7J&_oKg??)7I;}O7dd2P=)hptid6*bZfBN2vb~H7F(iDI zIYV%PhB@ArDRENGMTlX@m=o}iMcqPs{Mps?UEu=M9vJ;1m|bIC-7Z94OL<(h6d(G- zX}5k)gsWFsFB0c`Y^Zj{LH%+_jRt%Hf^7E%;VmcyE5$^N~|MIafH0?8e10 zlY=MaTo4;P&f9WU9CuCnW1letRto)e3Pzv!d<@3NK9iGSJmVFeqqi_w>x*skvFYjY zPYNpI1dAe*bTqv-z>%I-b1zaZ1IjF^G5@3q!9Vz7KZLDyb(vKa7WwA+IY+@vVg@BN zKcs?S9ZF~xmq)qLtj0;*MNEj@qjgup`UXuD>Dfll z4-cVuGCF3x7Ux=V1GM#*VU*iyAEX+7$=tc& zC`tZDi3qsylXXufIGATXe3YQq5mYxCX)7maqZT^CfTKm2BN1Z1ipWhMBHd$m{7f;+ z{T(iMc4GMJF8D+zUeJ76VVCcZ@fEHuK)mHd*vokYTK?2ZO4!x6T}@*&D?u)E+L)@Re6oiYKZq`A zhmLPHlSo)aPGFcCwccS2-?t^kNH>3s?{-=DRc4iTCJ95osO1Kxe_D>x=O{$JL(u&L zwlU~M@5MO>~{ujc}mmaU5K`s(;hd#=uSQI#K@UzdQG{Ao{sicVZU?d%*<#D$*zS zFMgNrD}pvX9c;~EnOXEsy3>@YJHl0ow52M9Bot4WXE2JkJE5ap?xUS0=NP%RKOB-? z)gs3WrrReI4^h7mi|{DVQ{7sDW&g8CM6##I@#^3dQ$djKE?pGe-S!N5@FhYjW)+93 z$k0h}+(}xFNX{dZJ)b7v&ivkRI# zW8js2E4{HZQX?nI+u-_R1*Bg&R6LJ~q@oR@jrJ!S{ibn-AzjSOx;6}fx$!>6%HmYX z;uXoFZzW{sTV?;!{XM4&*5B z+$PhPb~B?OCPD3Xp3Yz3&pfFS4|dV?Jjgp zd#R!zJnT4TjhrNWsbO%Xclo=jqp;;R)j_XA7m9C?ok8M?3=fATlZQucGGMCm5jwLa z<_(i6Cd(`rZPEU8$RCBCXe332)f_GBxur8_Wb#f z%C?SfPq7e)CNErIeHh*K;V`5RMi%AhzvKTd)5ayuKpr)>DT4LfWY zlWKiG#)jE8^xLq+hK3E7*zgB7yxoTP+3;~2?zG|CHvHIz2W>c5^e6b8WWzIT_+1+= zvf*kQuCd``Hr#2$w{7^54fokFX0Vlhq7Bn+c#;h#+wdG4&a+{q4Ffi8wBgM*Tx-Mo zZ1|)N|71fYqdLEI8;-Z3--h#TxX6ar*>H^wAF$yz8@Ac-&o(@0!(`dteB6f5+3;N(erCg%3@g868y;)Ji8j2@hE+CPWW!Z9)X4sg zKUK%b{;N_`W?QiM5(}=s)PlXEn)g`#1w)VgJsQ5Uw7RCE+-=mkFRd`#6^p73cUfI| zg}bu8Zh<>cUsqPq&@dKNsP1rO^%bQ?MbB^U;~EtI^>2Dzu%_HyTPJB%l*t#{zqD37 zE30eE-9?Lys=8VoAZV1%uc;uIXj{o|^r(RTI+p0xyY^Pot@w3;idr4|l!mhU>VPpe zu-N`ySDy#+MHa?NEl>@rOx3A+Rl&cps$A9ZPpL7gRt2>iwFh~x4c63HPW|3TsXnZI zvN#^wNA-zGj?2r-i+4kC$N-lv)&6#Lr0x zv{0N*fRlgns(;Bj4qcBA*w7IZ8yDZFud`o5|HPyLuH=+~gHqE54@u8BX6UftBSyMM z9XmSnxZ_V4bK*%^C!aF*)a-HNCrmu;^zYKSKxywj%p^3FQjpMTDbg2I{S z7M(Y1b}_qF^Dg-A_b$BX;!8?O=a-dNR9;$Dec9zT3u@~ESJXEc!G%{YT71>jORibE zOmD9XV)emVqk2JwyQ03nuHLOwl3gLi1?SG5ZTV`i+4(ci?(wR8=N5YNXLkF{Iz4;B z#H0jot-CZ3sHrY1HL9uVs?rAcf>PM36o130SP(FTsWWb;U?&Ux(35tQ+;^_ zsY`L{D;k0|hP$rPT~=CCBbh-d!ReH;x&;Bw=e7xf=qdWwdmH*VK{iAq4A5uW`NT)m8Qi ztMXd=J*@9s};_4&kn-JVjCuc~54%AiG8eKh=BqQBlh30Oi)YWD6bq#fu zhWq?#UE1kcSzUA~usTH{Xaa3v?AWnt3S;x7_4IbNrS#gt+RJO}uB<(SdbLTJC;j-S zgaige2{zfSYeP2KRIALTqCa*cTjQcHK$K?=d2iu8I(A90AM|?XtjHnXukZEFG5SNk zv&4DG`;U9Q_i1dru5o!I190qhjn`eM6?2)ts&3J}lEZY*kCshn!e2{}b`8yR02 zgo}z+f|h$s6_b z|C-d{{|*hmTy_6*sBibLXA0MeuVGR_wL(&;EON6 z`uZDmV*k+z(9tJ2-)aK%uP*<;I{$x|{(o-*di3vl0{X8mzu!N3!Gg&R(Pau%&hKP* zAwRb`7W30BrLgeS^72!ym!d*8F?r*nU;#l-BB3@|C<4=}X#* zG$lQrTH-I3v?Luxe2JrGmm0zPaz5}otG?QHDOFq*tZ(RgQ)+HSd2K}xk7C4h`CM36 zt3%BW+OX7+bR@pSQG}B)itifLvn!%&F>{#~*IhZ=(335N|D1-3`g7-B#@r;odxGw@ z3&{6^(gwrJ9Cu+wQC%Pyus+~#`B}-SLe`~9FRhqXx5$b)XLjDK3FF853JR?7-~l>d z1#;jBs!)JW&;pV`83+WOAQx1Fc+e11LQx?szv<`BJa0jjN6Qlan$7DNFV^r#Ile6{vc-~!c$~Cc%a*gjFNEw!(hLyY2 zu!#fIu=@0l!EILAqj|k|f>IxkVL8sut6xH#N|@MBCCus*h=zIOBvPoAllF!#b>*NewuX`>152FXxVd;}csQ=*9FKAD`_=hyLX}#eJ!Z zK2jHfj1&8-Ars44^8T($?ikRPxI3ZM8R%Qmr^u?)9nh+uJ4v~p%1~}2ojiw--(cl- z3{)8%L)y}Ichjz9vQjlXLPzIRV82+^&+)j5fxeoKMn9E7{u$(-LH-%z(^?$~F)Cqv zpX?ODxx61ZJ5}4+U2DSMIiO|H2^tyD2)br~ z3$*Gg!zr_r`j97@R*LX5{2MLfBj+piJWrvWmxWKCE_{U6tL7?o6Hlcb=5E|C@LU&- zGbm0Cn%Gwj8t>9&kT_#6Q0hXSXq+o>ujh%zv1pa7T*WTs`Yp5?;#5Pxe@HQqw1$iy z6wr0}a)0VEfjXovXQj01^7bt2__Ve`yHmRO=rMLvuP#yQP8&D7y%zPe+f%gMAC@Y0 z%zP&NgcI2N`y~9P@;E4qz?2~g;Fk<;E;XcnP)ACeYj;v>|E@Y~W7KS@RO*lK5`mvi zk9g7iKIdEPrI>x>yFkbAL^T}V9u990hlhq!zTx9D+J@|=t@PxhSf{{f1(jJPb zYxpapo^Vcwa!wQpY$ zPtkoD@3^D*?hg`gp;9B?lN6Q8I2BwcUJ*OoQ5k!r{=+>K8VyZQL(2!Kp%atT&{;z| zteUZSLg;w%Ql&29nQ5n)lF~<|OiWZMvxJffCDFXkT*i(#&v)!_R{0WD!VP@_);N=_ z(&3wQ`or`atiCqml%%|oMk@IaqK*ctLDL8PHlf4W)@OHIYfO>V-p~hAR@qZ1JG}Q| z|3JpLq|-(l$!aA1_fXOsGGSo-fR4nrgx${8Xx}L9%!&uE5=QgufEYDke1bI|%!!(h@ITtBcadG~) zy1uP8nxflH5@k+QLuN@!=%#n+$hgp!8?6Vv4MOoPL5n z#O^D)`h>sStJEKUqtqik`KdTXCA~ zsQ8Jjh7Iedh9TeeC_zzw@Xr{{xYxUOiY%FHk<^XuzmlLIG`xZSOVb$I7AHaDM3s6& zav(iLdIak?Q}&%ZqHl-8f9pk9wEDMRghhvcwO+(*$JrIN74>WkO}BQwrW^G&c?;Qd zK`otchV1@NXJ@uc1E4-`ZfUh~R$cvUc3)~LtQjZ!8`HJ^f*s7O)I+heD~PGL(EB8GxoibYGGY@u%_ZHHehG6&qC-oR9-E6RMYF({$+D-HnUhZxRv^IOhHBI!ivNE zzwA!MN*EdL)VSF-70lU>jUfj?#9Lm@1~6+7eH=ZN7_N}G)9V&20HcEHTC%?*c9u~y zr}j#w)Om~4=YqMFDry%(i8Ca{*+#kLNe?V32=>K`0~KnD^|h2e%79G0y{eVgp~J2F|i~zNr9N5BZUNnO+)TT|;<+ol`@7 zC^*Xcf!_X7>Q^y-_CC+5uRu~Tx-3OP1XV0<@AM+2QiVR}<`s(jb?`f% z{rz&yQ>-+o*Qj~f`Y)1wJPP=zto`(O_c+d~X&?b&u@>T$Hwa+8ohfe`jRR6=Jutk# z2UUyp)@yz_^(f&jRMl;9bEzH8gQ_E@fIUNdI}mPsEG9pyhtRtYy|v}D1J$(_V-z?f z^Stg|&Dn-%G&FeCCdvQs532AeG3Kh3adWH7E2dYK))&_m%8v20#YTnNa^!U2_PaIR zDRqz49;Mc4U#l%L`;I*?SW&;YsG?qLY@kA*@rKHmNu3l|mtAgi_`N;oWwRy(o2@xp zFToU}#o}$yJdaD=rSq9pVG(nMj%~MfYWXKU-f8M^$#f_mY^aj>(}I7sNwyWI5bx~rdcYB7S+#aj737w_&5pVjTK7?tP{0p@5h1DR{$HE_ydz8)8 zJr@0{uL3)tnqE`aP+>Rk>n+Z(`!27#tw(9j4H|)5A^}-w*7M z;tF)}NFLHPiC+p2%L@7t|4}^RkGT&W&TGF3~yQG`D72wkE-N7P}%-tWCWAJ$j@qv8Lv@&B{<{Abhe9lrN_ z@BIJ${?DL5@=5Gf%JHZyU`v%pWdZj;3!{H& zy8qi*VvIFkaKyyv;b$EKe95(ouN`F*^;hp$j-UV1g3Ir0`&wL{rHvY{C;X;gy#5Qf z_4%;B%MV&!9veRVEyH{5@EZufYwi1Mk5M12HP>QEqSvo0{iQ$GG0sCEIq&t0Uw5lZ zUcc=1@x4Mbp1-u`?Y1wJ8n@Jn`T0Rhj^dbcrv#qfE5`rSIO93x(0N-gG}OQPyU^ip z(V}Slk@4^N+M;ix!~Py?!QI&wEV9cTO*{IoY`zrXwkIt_wvyjGOgu@PsLV9Reis={ zeh0p=zDLF468qimq|_MuU1T!(9XMcx7nxIjyY2Tu)~i}$zl+Q(zbgAZ!+KR7`yF)< z{d3yyY-#G>?)_H!B5TTTz5PDIdQ~g!ceaD{&uzcE?RRsZ6@Qfd-m%wuKh}OPvfpLz zM1CIoorOjH%eLRIvfthIyKcnzrQ7dOVms~koLjAY{<|Q}SeA$M( zZTOrGci8YL8@Af;aT{*5;R7~YW5XM5xY~x%^qcJWB{no{SY^W!8y4BnW5XO9PPE|| z8z$RO*{~lIxM-Ub!bjWVSgRVk{(9_oT{F$1(?1HA*}rIiAvj2$QCx&SqHSD|Xk>yW z-#Y$c^#et-i^coD{44VPWAWQ;dblT8^yu9`^?sLeMSf8zZfWzmJm2M!_WBc^hk0J+ z`74iXYi9Gz^E|}!63=Hm$%H+Xr;tai2mfFA{XOmSm|nkF z`xh;HP9LkDvTZoVhHe}7bJ-6m2BTBH%kbf^!@2 zO4j>K@dvKr5&T8(<&;y{!^52obkIp=MV90iKWb-I9I| zH4iwIPUAxSJ-}1YwQR(l4Xor5`UHSCodIt6-vS(dCS@UR6>uew;3IIo?H2fF9?7=@ zc%jG2OW->^PZ7QiSmCwYRlp7&%~!xvrYZHN-~epnd0)Zk{A`fR1v;J+St&~KGX<)h!n(<=VJ z$9aSf0{hHhEX3alyp>1Nza6-&P^mq*8-Y`1!t=NVKF1?GBXIh8$WdIIYKuyFg zu$)I|DDZ8DA1R~zeCnM?%D4#l2~RoU6X!BF;gRqYfq&wWtC&n+%{;4I02~2Nx>!wWI?~x`eT!KkXejn@94({(`!hN7B3n__GqF zG6}N=_y~`L*$C|55!z~4YPrV%FSgxnz)|zz3F2k~&*oWz+Yc<~k#wqnr+GG`!6D)47K!jo%&gBKD8|8(HOYoG(}MZmk3Qcm3W z0)M{@y5nvIUe!ohl4$S1tPpjC`($ACN_Y-;4KSt|TH}rb)`n>pxC6j1cy7n-`yuV< zN6-y-HgFM-v`2wSH(373z@PFwM3~!wSNzy=8^8~2_sW~-D{i)Uzzv-H6WS8t=K=5G zk-EDVxaOzS3;qH-c!X90Pruc2`+y(t#KBi4@Uov#*SKqdxARDNf%ERL@)8)hllDaz zfxqUFyw(FBUjtv^FYuJLv{~Ak2ly$EwB-)q?Z2SRgc0aoXQeN28_!DoJAjG5hF5S4 zyoBcf?h@b!cfnUK+V$PYS@&4!7Xk0#5j^h&e#mn&VNBrYdo8}r1a9S#w!Z`T)o-XT z!h8*^xgXxZE%53Gs4v`2z=i(-KDZYFXKkP##9a)0i%06Q4Y>Ca%Y6X2{&(O^7=c3` zxA-j`IN%9uyz>En!XtRz0vxgxJ|=uRaMd=(Al$2gt9HU;;JF&Oco%I1_Yz>rZi@#} zfj7NqkEg)wmuc^W5x9*eLe21O%HjB>5f25z`2}oT4@X66diVP3lzO`aSL2#yRQS@X}bkJXuDg1qPH#K1&WTg;3iP?pT%FG=+TP5K+(+< nw?NT@6}Ldqah31_e`34u06t>71&U6lgcmsMed+*O$?yLG6?YM| diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-arm64.exe b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-arm64.exe deleted file mode 100644 index 7a87ce48093d2c984b2ceb7b1f8e1ba6f5fc94f1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 137216 zcmeFaeSBQib??2;j3l4Yi(mBOH(oTdAsLNbDN37K!o9gNE-AJHP5LxNvZ0kMlGqr71~9tc z-#&Asu`CmM+xt9!JXfF3=$y09-fOSD_S$Q$z4qGseB=}NnV>Nyhu_GEF;8;UU(Ei0 z=YOta%;fT)Og4K$&s03=T>nf(W4vWU_zSmw{-x6Ni{JGD4{4{d3`# z&xNo4*sAa?pTFtTR}>ZHUy=dseE-`|U-LhQzvKT_{_xEQujP5=56?V!$X?%iaJjvv zAN;V*m;TO7u5&wbzf;F`#*fZC_!)bB>%m5Qee=QVxt{pZTMu4suW#6EL(AH@>NsDE z{@@y8ZtBc6AO7n88FoaSqZU#sn0X!#BUvQVOH|^+o`{E zOtnQc^QPS~cX+6d&uX8qxb4$-+{WE^Z=sM7Qrk1_@C$^EX}aRpo0@NHHs+tdPAV`> zg6pkZv;JbR(G^}c6PwM8sZ^ZL^^J4#bzkuXFQaWEZ8e4D%Q+iS3-8uX-~9Qt#62^cA3Qn9q`QNwpZ81Y-HA!DwBDas zd*#9Mz_g)F#xzncseI+(;B$S!sh#vhsmh$W%SoN#9jv+i%w2V0f6?m{gaw_o3 z&D5bfU@6a;yZ$zHCP;eby~Sycm(SLHt&Uv|N1Y3IeKf!? z$ZvJKIr+mp)99Gb{5fnnHPo`E+6{%a7^nRC==9R}`x-MZ$CR*$JQZT<4gYvG>{UvdU$Z8Ov`9Rjxi}4!KVL7Sn33kMUF5 zI8=<=-QKX;xI@Ps)42StYqnM9nv=2JJFoxyk&(uwQ)6529v;~{QMqh5^+mB)c0qHu z&Kw#Dx(AmU_h76wasBeQKDCxSd+{lho-?I;q#!22$zu!PJ`@a#CmR%7tf~ z6RrKyeaWvRQqaCNFV5=+NSE&Oo}d9S9-GRiImMqTREs-JcIpMvSr zENGmAZ6+|ZKi@R&cg&s&)1F=(HYaIY=bV6PTw|s#4u?mIn!@{v));qj*l=%jZx+un zu^O(HFMN38ucBWt*EH(7Hpetpgxk|gjdOB;&@|Q-#$IWruiCga(dXgt3)f%5uOil- zu4rmcH-&dze-po@CU9~NJg_Ek##iJiluX^{K7q3OfWv|_3`w3c5u+yrgIMGr&U3YRo~VMCe;do+G)b@iIJ z%B#=E_oC~u41GoCiW3hFi@rfWf{k>m|CLcwo5V#J1tJJU4~S^}61~ zRrGoodQE}W;D1Oo7OkP>A<{srP;`S=a6LfsZ{~SRFJWi!x+nDtmUOD zOf20WH0e|D`l~^w^K>xK`C2g8d6Zw?$jAeI!IIAYU@6aKoyUUHJ6AD&Z(#iX5##sc zjNeZ(e*X*O*D!ts0|V15GbIL(w;w+Jw)gzzh7VKcHPn4II4lE~rO1T36u4QrF@DTs zKL>JRcdRKiRWAWQmSdLe&Wx3ZFNYWw+(2^hniO zaB2pp3fF^M0l)MIorB-Yaez10?lr?BReA98Ze#Wc_oZ(LzB$=J*=+nRE|c!DZJq;s zPjQQyS8_PwIMDMZHYf~TF1`L=HYS$(q$dE=-1 z{nBjvC7kJ(E4_XxG!5TFZU|4+`@I~G=A(T?dmk>1RWBX!@KqlTg6EXUMTG_VA$L+< zZcdPj)fcs~bNa#Jd2@C@WZ`?a`oRRfexR(^4_9x?#$B`=?T1YNTHaAVpj)!@9?k9t zaDF&LW4|BNCZ8YFriaO|K4>2v>w|w8*9R-}vD-GhZ|KCr`*z57Tb8l6rvCi4t@HC@ zhib9IY6DY;c0-r!euF*j-)h=*^GwsY>|!%D9$!H`y>3$DgU2c591-iuT6Lg?;P;S{r?`H^Wou-zke^i*Pf5g8G4?#zuqmq z|D*kt>4)>R`(3y9KiHqv_C?-eR~*Xbea2zqYIwc^nU&r=Y>JHP+qb8b52K_Ok$#_XuIt)`j_bhP;FiVGj%jZ&PVMtyu5VAO8`k(f zwd$`^r?DfBP6?!5oDod@Yk5xURb=WZ*$vqL1L%I-0qG$6;b_KgcoDnd%4uf%X>|Rm zRsX}<@&RCOLl>P|^*3Ylgu8+%*$?qRcg-MWhk+RfpU6es)xcd8c1jL9PW$2cd1iZg z&~&{<9xCcwi0#runNncR;5R~D?UZY}xV!o-?z<^_`}FSW`-7&cFIX0RowOM5XYjo5 zR;RU@@xDFJOnV)fy6;}o>d-fdfVoxaBLUO;`iz~k%N^6Yw|M95?O%0DUZw2OU}*&F(cwktN44jftadz|X@_Xjksevp7o63ZzAJl<^mmy#(%JMp4ShN+JS==a|dk>kY6x6=#S^~%*r@vUpzCiNM)ZHS2ma_yAfRa zf-^eX@=VP{Z5C@=zRaF)-j$Qu|K`X8BlpZc1V2CP7RA!EEBXdJ^=2>-4TRep`qu`M zlB44BfwfQ74Dy`KZv#BnI_^*c(pO-1B4 zH50+=$ZcUySr}{2+qs^b{qJ zlBa^PrSvk#l!E`^{i_P`{?n`>YTlBmw>^+CT4;;a%WC+`}(c%vu=ghk9 z&YW2vxf6I6Z^xNz@s_RL2b^C`WqiPcwoMh2ll4w4Jw*EQP^>HNm~{R8?wXd6>59)c zHT|nhvTuEUax%Dy*Pg)6Xn{ut7@Hwr+zZdR!r`Z;wGJHePnK7QpkoVde{t>POLc_41H8B)&z~(U*C&H-G4nOg~1>yijd<8hYeFgJa+^iqqQZ$r+4`Q(QGh+M)Y2 zp6}+`Prlb^^E1%v4;`~teWUh24qmq3nkOeE1D|02qj!HHc{d~0B^fpxC@FB;V%wM~ zB+ulS?UG}XB~zhe)m)PdBh%pb)*|rTi+^kbV`bOZW5X`*b>IGVGrWzqig$vfjgVJq z%6|jp8M`+BThyJMpK+Fch<@oueyG0dXGs>eH~g`Wvoi&pr+0omgY%u>+-v2>^t0qg z#F>7uC}gI=qpi^xRsMyn(X!-o5%A z@F*!*2p)T((GB1+c*G<}N=#SnS4?VPExh^_Ck1W|wOg2#e(5zTiE&&6Ezzi**g6pCeAbS0`h$k9oFZ+KW}*)|u<2 zX-s9qEbw9W6ecHUU`ihMmBhM`lgYL>Mjm+cuJOJ%PhbB&y?^}7$OA5V|0U}qD8WzM zzEwU0hH_;^ZMoaA8ykErqUg|<_M}J9u z8^G7I-(u+we5gA!c-13sHXs*&hb$CK_k(qIoQU`9fI<5YHw9{+|7=$IUVIea%9M|T zQxoO)L*K37AbYbA8C(Lt<#EN{?84^2M^)0f4n9<$e+pRY7woDkbM`d?Dw6Y{&$pV3Ye;aU~wll`H6e$BFO^AYyFwky>WENiZh|!6ms1Kw>E5# zCStNS-t>E$qyg^Z1bQni;c}$L6%uh?eo7StPy1_-fotTiez5 z$g~0QFwK!<1o*pvZJ&E0$xC=v9<`&0`jzgIPcp8O_xZ??{7RFG^2U ze4I|L@?^m-VEhy95`O;p=v=;~0h{1iY!U3Y*v4`vf4zf$__7bix}rUoZ7JwXG}I$+ z%AJDsb=Vp8^T~6$+loK5vA~25R+1-D8QbVOp)L8%CRtg;eQx{V%PQT6IvIznox-6Q zeYCtN*40*5X>E#n?4*zrD$w&i*cjLCHp$b_khUCNM<1%K`LwnEQtT-+yjaij%c(sq z!ItV-a#>gLpPhGQ$vKVd1<`>9@#KN&ulpAn@G_$)?bWVd0g|PKSxdpca6a?{r5LKi|3jGJ9lKm z@_7dT=>xn!*;lgj6aE;n^{f(K{nYA$SE(n#xW|6Eu^bz=j(+Qq{HSn}BU4N|bmoQL zKJ;~1Iu{*CTb`|Soa6xgf=$;e-8aD44ShLwq|$Mpwq^TGa*+2bYs+Qc_uWI^`gwZJ z&NDbS)`deYIZzTy=Whx6

x5%&be+vRm^Y>egOSbwaL4{e zPVevV`BQ%A1rNV)`FG|SvoLd2AFF>wb6r_rw^x-nb(L23M(UiThj01wULIrY`%RUj z^LU@GE_0^MIqip~i{aT$*O)ZCXLWPPY5P%#=WKcl4s(6@-?a8I&-LNE@(1*f`3dYZ z4qG<6eA2JHhuVk6z7V^g3nv$+In_H@3k~ zl|eHz9#ES@>o-wPd^Y#fOlt-AEpuXB^#L<8;6gv{k0oz=?mv>n`L6(aXh|8Z!YzveP&1J44JiPT$Pez=#o;q94 zeT$kWtmm=+oq7~c(1$G>gC61qKTg1JpK)a0j@d4m{9S$cJhJmchZAlk_KElpKeEqX z=gjBlif=u+@H*>K1D}bdhU9a)IF=r!Uxyge`Od6^9rUYYYG`9@>mp~?mca#eHTBcX zp;xaHkD1m_IEfwZq*&L1Lgv>_XkCwM8i%(yDV5t9aMv%LYGur07xX(xWb0!;Zm8PQ zn-}X^y(}_x53vTl`QH5;-51BY269bR?_~G~`zlPo9gD`&bCyN6^al$%C7V7pJn}$O z?(h=H!duWUF72~ zPT5JScMi{|kV9cxS4roE)T8vH?^1cL`?hF*Jzk3q%=3rs;ze5k^cG2Ekj}CK12lSoj^Az$~ zw9RuaxV6dZ&-Ml;0#$!WnZMxoXv4fM9h7TZ3N2}S2jz6nT3zGdv{+XMKhZ^V_Q67) z(W!cF=b1UM%WqyEc|h$GEYU!4b_PQ0j|W51W300%pJex&b7SeVV-dXhkjA&ho6qmE zJF;!5N!0(nth52{-gH}GWyldpC5CZ14(p6w440Cogm-VzC81|#`yg?X0I)`DUkH@Ft^wmh92Ou zh&kBd3xMrRMgMWH`I(y= zfs1J3oV)T*C)>kuW< zGoHO5o;mV~k98eECabJzSI6+wK0?T6)8OTGOfzwdcU>h)sDyDWnEti=5)S@K08Lt#ZmN`_y~i6b&+%Nu{;9d*gM= zy%x-so!NNRMe>!kH@Jp15%{5-dleilh7|8R@V`HiZ<4{F*%Jp2vIIY3uoIu+ z$)@7QH;y8kBc#ixUwL77^>Xc`Q3BF@d{8}*aoXil-taKkjSr;6W}1+i3{V@?iY zWAEfTz*RP79ri}zJ#{JUpOeRJn)hxs@0zFR-O>r)ZJMli%O-rcyT~*$*FXFkeXe#V z7z>s7#941iZpLSzwDLjLY@{cl-_9a)QaYG19hE#^&-nd5{qOUF_@D_sSO!eR0EtKY z(526j=Q&_%-ba}|*Yl%|H66fL-80G0dfZ|4O<&NBz85}d2bW&f<$CD{>1c}!d?9`i zU+mJAe%!?Mb$r3H^?yknDl@=3(Sz_tJFyP*^Pa?H))|BFFtG-e@P?1uUxL?Z>>$ml zHF@d@rpU~BFV z%&_&PUQ?bKmKNK{BfaUv*`I;4nb=7m&b{Q>3>=FKaC}_ETOrE&w3i;dmb|^0@f_e@ z^Or~7q>um@~e>;ON_mxpKW`iY%`*D^=% zXRh6kEqio^Nyq8WW99h4ktr{}*Q5uKm#fKhwA`%1#;r`k6^-U)K8g+x*S7 zqw`h!ylL$DtaWGNQ~i3;O(){mg>m$_8}s}z1#zt^)RoA5gTZ%JKIQu=*5 zzuSN_JJf!-hJ3z$YkG>lkW8hn)Y~84);T@4^~k!&@W7od!^nOz@f z$F}{gmuCIFzQ4xsH|B5mYy#Ju*jny5L-Hj&1#Le?zb2ULy68;Na~6G>$Xl9TINWw5 zPXD$Ark-Ss5l^r*-4<|%NNWs}PqwyjI00>>FK#C+cNfzRJ_L5qy22Z#pZFUnHM({yNRG zC9{Qx?@RH=P3X#{k_mf5O->U2KP-F6y=-aPJyUl?K2ZGbhvFBSlEO<&8u~Rbf9iCr zvE`UshkqHn6JJlGiw)#fn=WjJMeXKJ6sw3#_f=nqU+KI!$Wm%60CTYQT$_!d!SvcG}aFepaXXE zo;Zo>MC77L~PXg?BcWL3BYe#V`{Jk z_7IzwihtjvG_M+vZ2B*CUA^!Z_m8=|rzAs_u~bW^NtN)vyuxg6Mz%EJi*@Ufg~*i9 z)ySWGll~XpH9@E5r6&11?pwHzq?X}7aF@7F`GP3)j--NJ>!?@bQ+S>||M)2}Oit>w z=n*u@=qc9TmlYq%a4tGvB#;_eVY){4=M0Z5%N;&WT_gL=#*q!A9dT+#Y6Iz!Wr6g-wSn}| z6T#HTww$g6c7xVm`j#zCADOMVh**vCbZ8zFWZclk(8i04m(w<_%PJ0P3ph%ifPd1^ zj?>IjPOYj#7mnE%5#%K?LTMNKUHDzk+P;hJHi$hpfUl$$KaBd;#okw19Gko#2VXfh zUp$ldHy0ouFG2>DIV)?OqIFFg_rz)_ZwuoyO1{-Sp3N5}7DM;MY&AxB$NI|p%cdoE z#LK#?6Bij9Go!J8Id~O<(|_L_OOw9pz=3#m?fp(`H?V&Io`v9}^p;=6??{czSTdTY z-)XHQPoYaYT-N$|E|{9w(Loz;LAJ2=Oh0tjtYbWPP^bJuKO|=5W!i@AyvUb3V|vij zG13Ry=*!oz`?n!Oo+qwCJh2U)yPYw>JfdqUdO~(+|1#nY&_%?49C{GliTvr3O$%N2 z_EN5$GOl6V5Lbmw*{CvFb6~z;*ON9+mL40E-SCpNS8kxMZwHoLv$XqWvi-tZSMhY1 z@M>DTG);`bo*ZHV*lF6J8W(PO1OdC9g0)75r` z>AHz=hFnRmi^jTIz~^}3@RFPJdTJ7zg42-at7==E)&c08z)lKnGKc2P`pS`ei%s&D zmUT}`=M;f|{0d}KQBgsZcPmx)hL=1r@gym_(d`g{D$@ddFi z{Guz?@f>n4UeL0@bVX>J@5EHbcR1VUOAUPHopUZ)V<>INA@^+ z^sj!}{roQnUv_hPYRWe;Ua*aD+yvQH_=zqq0Lm6=5@M{x&JKjE0 zI^$Ps!+V;!|IFMm+bGT$fG?-DL_OO`d=TeevUR-P&&I>bPwJ8^k)1U}o8#zuPj{ft zf7NtH+?mFF&GJ@5FEJ<1X`gs~8uhwKc>XqYsoPUNCb>2wkc?Z&8Qe_P@&1*C@KPz8pbc*QT7-msz7~ z=U00v_5plZJKt%I>ADhs6EJIcJFQv=4B;<{p!>^t-c6rkea8 zXukIW(^bR0@K~%XWvw0ajA`w$Wr1@59~<}Akw@!6tAFLKF-LNFcgV}Pj&~8}>iNJ^ zU6+urzS&u6btvP^WnHHw$J`jXw0B2G&@8SbzvM;-xV*rauYEkzhC`0^t=9gUXX>mS zbXv6z)=R$U$d{np5w4=GVoyffO8M-zZsxssr-f?>oO~YASS$|*pL3?(&D3(t5C2a6 zcMY=W_x0a5vik4Z|J#3!|8Mo*m|w&9kuZNj7UbtTD_8#-zAGVqs|0=e@y;hh>`U?O zFJGs^B-_QS(m8)dKUZY#V?ne1;>4nCA0OXa=<49(YCcpoXt zRUBGX#F@1qjtzzHtl>5IRqNai_T8)8tDNH7LOjQL-!f-XVYndDJemydwWpRwPz=CMJf!KuHi*F@9X+@P&wxWl; zYq4Rp&%%*!9$V&R_BxE$F)r=Q(lL9jzH`b7r0>+`o0tQ8aLS^ZlL*dR^q=jIqTni( zkAwoNdcl9RKJZt*XQ|KUg*wUzcElN8V*8ph2k6VuG8r7TA4Bp)bTaD8aj}*3?Feh7 zOhvt`p8cQF-|dfeb_*vvKmImbo;^P2yz}#A*H3oi*X^e(&Cly!XOiafqd}wZ{Q9wJ zM$=gLwtv5G+$E-=hlF+e|h)DWR8+X$)( za;}lgJ|m?elMl*%ZfO{^m9l;F+3OIBfwOaK^imadtYd!f>&5FFdy?U9nvoc z$Rk@bIxR7K9rHxt5=D=cDNtHm!cE8F(j3%o*z5 zo0C~95X>Cb3U-qgAgvjgYuAL5x!|sM_y{}a=G1SQL!QshiCJIhs|BX()3oU`&|@HI z(rZ~0v3dmEDm@}UNCF+yO#d~#hnOY!f0R}id9>=M{eGFvS$UtOf7elF1$KQK^HV?H zb@E-&zK}E9+9i4)zcgca-;X~5c!L4Y?v5L?q>cH9Uv~ZE_J;M$zd{=?ESA4VWwqal zHSp2Bs_f4>7|-wo{Z72oQ)k5iJMeS`zwz_RY~I{L---T~FY%{ZUzBq;Z?E07`VRHO zrYw4EE-hLIpz%$j*-|HY9RE!i7?!v3gK51wyT31^zbAqp-}FLt0$->%CdPO~dOVv? z&Y3U!@he$z5#s%IjJ?-`PV`22a`ok=HA0NT@~P;%<(;EGzT2KM$pk*l#n@dT?6o*F z5Fdo_zqR0hs|2S-@b?nh;HNFxw7PmB&nhdLsr(;i%6sji{5aatwmZszbE)8rdxy-p zL+i5H@TRl&D|{@x31p0H+3fs&-a+9$&WCvWrkzj9|8Z{m>L>B|sTbzxOrx1m zVi#DWDd~i7i2Eq(yaHYm-*6utp{y-;(WF&_vLPG1_{uorNIu3|{5wnn8}*!zJ%|tG zPzz%T*;J#vJWuoGp>Qi99@?Ly|C~KCp6y3m)VO$t$L~%){yqL)``+W<8}E-)pH4*g z+mP*N@S*p?+u|+p6f|gT37*YU>!R&l_XZg6;3FL?ItjOUL-$7RH`+1tFtIYk`}ku- zJgz*)z}d(D&$ICN*NI<#<##U+@;5yA^5B?#=jkP_3*0*a?pxsFMEWr(e~VpZV>`18U!B zUi6dA%T@W5r<**;*l6NB`pWmqxTW4YqsuyD2YYevg~q?tTARzB&s@9D`@VuNPsoTgqZv3yD{_tfKHC+4( zC;Q_p+Sg`iFSzO($sB(@CLh1h^4&4wp+ZBOz^T4DR->^~Z#;fnZpM6k_1wGUH{_WL ztn563Qy+rQ(GSZ*#FXoqH6aJyF5q7E#(CEJ7WP8>^(!_#dtLC7m$fdK(f18?H4);( zH8$k8eFmS-?TnpLU_J64ohwAlOprAdWOe^stwEUNSMj5T*-LelSni`*OOqcn%YW!< zT#orRGjqFuo!g0LeVbJ9G++BVa{|@x$9vZib2puQZpP;+*(G>u$kVKG{x0j1Uia%# zJCULqq4TDUPSpKanS!P95@`UT#)9SbRBWZFHh_ajl1IZTuAL;oEae)d9wH)beEhP!Do8 z0>8OxpJcLAS|C4OJ@#W5etLlMS?7chiw!Wb;vi@u+n9<12|F z?cx4z;+W(^-;K-;Q(qJC{{;B(zPAR-x=4@wm@1dWli9Ei;M*6hKH^kAmw~qnS|mj0 zOzeYX#*Pe*6|5Ec&vEj^u@`RuH*f8dcs55sxY2)FJmIPD%-*QrY#KQH1>!-lg z*9|kcH?&JFCSw0w zC)u~mBoDu)J)^y&a_!7rf8)uOPX;(QjX1TSNfv3$>@Pl~zBx_bz;nc!2I-^R&QsvD zBnzik*w0K1N%5z^sS!FY09ft?9V-*hJ z-y669z37wch91oetz*(N1${|IqUb5 zZ`EQGZ24HJ1#3Uhl=<;I?W_IPrguXrgAn5zi;ro z4jqypO|iiaLqb~o|LPreG%6U^6LJ=r-b<>d_NB137k0 zrI+|bcX?ep^s5bbgq@3s85oDQ(ZzV7ISsSLAK8n`} zo6!1RcrM28K*-zY)(*dI&fIr{Bf87(e>2>NGWV|rr$;N`>B!XfhAMuVll5}_1;07m z$GP6dPjj+*u8sUS54$wMbvr+e`vZAd^=0mDeVKb(U*=x*?WX>YY3&Vr_=PCn%k>xh zWIuHPa}PhYx1O{{e(_=>KZHHM;CJX~LkugiR8j49+{M1UFY?|czi2!@pZ4V_&zGR< z_u+q!2hP>WNn^T+i|qbWe&6DEAHS{q(7WAx0;YQ}Kf@nu2DN=LnTJ( z;_X*d3|tfKwc|4rizB=RGatMP-1dg$;M9VAa4$QsgENtz_HgxZIV(OJo#W3-hS6OF z{daQyS2_ZZN4N?njT6QBei0dQBw*5tb=Fy632YF>QSFAmM~GR}Szb4=9${@?bdBuz z5HO(cp2RCJRL9XF+9z^9_uB)q#mk~;&ix3ZuiWwZ+2`Qr$F5E;BF_VFVT;}E#d7R> z4{;F4SHW(3rMy};2l);W+ue|$Y=r!ZrD)Q)ywt-n0i0Lyk-5lpox3pLnyTzJMZns3jd;)L8lDz(WI5!kwB=R`SdlX;H2V?Ia3XS{RVbzf@f^q6OqA5@`s&Rn7%eG`oy7OJ^iqa_b+0vjf^;{V{1*)>LAJr z$IFOUdhJi@QgiI{^v=KGS>vV@TkHqat@r@f*ckT1%V^g&p05O#yC+ZIBA9~b#f_G9 z&Y~=PA?<#7ox5-lTy8r#DJ?u*_~vPz>wh(+OY2d7tfS>^PL*U$H84~cO@52b6!jC<{jWRdajwyH>+pt zVOJauLyw)4Jv@)F7g%Q@ZU)9?>_Ne4$u8%V9^?$OWGyhI55>niPw!ilw|ft;y}a{t zW^~?KL>uutJjb|Fdn8Bq(Uw=SlNGxd$NqSgx;mg4d%8NG6z}|LY`BBIQM>o>ta3l% zN*^57{a)`d)5Gl^QxjgoEM=RxD@;4r%^se zU+7tU^|cDGZFkY0IBn*v4XghoL-qc4ukF~9=kCwQ<~y~;-A!BYCuvVe6MeFZK2W=| zY0LhCUON%oEO}{ZzdMI`dd}$A-X`qBxTpIM%T}6) zJZOU7w&@CgDqprNH2jUnvjlkJiJzzpJoa<0UOln&M6|eg9L}?Ow)tuAMEQ%y<)1vR zjoJA8g8pcK82y24$ZGYI?vy?r~|MhLzcoUCzjk$;^J4lRIGJ0m& zk$nMY=(T(9-J$XFMfl(W{H%L2XMYTgJNrZWl>Hh*4!V7!@ieA)#@0o(7qu-=YWeKj zz`h;W*aDezMLauJwrM*$MEL|;GhQ?fum`orqHBcb zbLTn!`DJ7Nad0`&vF4hZfjqOQl6ut#ZbdG8x_2hD*ChczVawvnGK+Zktn9}X*p5~g zrsNyQw{2T)?b3rgbY@0EzUr)V#xi5to--!+W_PY#lVmQ_x(Quu)118I7g&Q&Waka@j0*zf7RSzs-E^N=QsFBu=3ajVvrueF1QCfnYg8m&|!e{Ey1x$ehz=%j9_9T z+4hc`_ea~|5&VCxjJ-ykiRR9Sjz8sWt_pV*HmlcOC&%vZ*FI3&{yOhjduEj;_{gE{ z_FaYdPHB4QjdPD*dH<}ld$!$sp}vc3^75h&qDN_;cwS|Ud?m{3T#%HVV&U!{!$GzI zHq0KIo^?hGydTw^M`d>T@0-WUWsikk@nC9f-i2L8449|4n7?~*Z<*(>@Qi-6@9*SZ zd_#FV4*niE3C4gwpIXm+O5;KEozP_mcMyN}^u3vSJ{#cNSm0%#)sg!=>iOX42Dr9YGGHXg58*d(-#JbXfib)Se`(rip zUh1y-m@})m8ymhh9BU18jl{rV^~4iK`n@&16fmB?bkalhFPR#h zHRrz*PiwAKKg$#=uW?_D0vE*`8@sk1pBNL=tAEnIIuK78Aldr4zde+_V5f} zcmCB}$ybx!Kwe^OX6!=NeixYIZC%5k>Tp|glU5F8`oZfrZ%iCxY>W4`zgzz21hm-9 zZ{RNimH)ta7SG!DWZ46fM;~S_&h1#%bsc#HyWA9)2j!hkh>m(lm$C{yVoLXxA=W-Z77Ow!S~q)yF6VSU6Tk3Z9d+ELM zDbIVe@^q4?bduTgr;|&z2*=kL1K*rnwgn%=;ok)(cXl|FN9`lwHGq9Y-|gWXsN~CE zGsF0?t?yA~2hhEnX;(fzrR$J?k~8Syw0vDFXy-x4q?J$am2WwDjzg#3D-P@^;C(M~ zCCkaL_wzD&l~1rjz);?K1a%YW|mC%&cHBJ+47 z&-LtgNCyj9yDMW|sI>DT^xgJ3=Fm>gj(Y^(Yy$Y@&a5Hq^~MmezJol8*Vd&MQr16n zOuFm<`BdjT>Qr5E(SZDM>i!6M_pxTF=M~&HIXAx1hrjFfU=iz5Mbuls+EhX3QDhYU zg2&q#mpYed9(^yEYNPt;GAFZ^$vyka)2cJd{q@jMeW`JIzH_+xMJvX%>^b>{{WIbu z*ZPpR);`I|HmzmJo{{|`nJ*c4p_OrC{nC`7cRsK}uJ*lp@>lwldY*N`%MF_pu?!YB z$sUbstSiA_^40TCS=gMldw{k?ej2ns`fR!8 zxAB+YYwxjD+1K$vI76Q?JUkxyF6TgIzst6}DF0Xq?U2m+Nxmn$y61S;IgC-I-$~q@ zuFrFxt`8?${`z`f{qqm3RcoI|w*1VVUmbyN>z-bwGj0>|57i@gQyIA%!NzOi-7ml? z5twSv`VyZ!Tp&AqYBK(`M++Nxlynjc=z<2gT_OVE01XXI;id zJ+kUA@HL2bvL~|Pd}IPR+5Pi*t=Y1l=oBzAbD8`6xs1l713dN&dG@qD-_l?2I_sQ^ zu8sKH)Q6u}yf^r@qnCDgX%pxrzUz_w{e-z=KKD1|vtthVI{x)H@x4ERZ2xok#@FQ^ z=N>-T`cC6CE zCwG%ibJjgBaU}VQBPr?h(?--!q|vs+_jB&m8QPigm!Hg^dczxK?f37{e7OglCD%>` zx9|8exV1BfmyIpBAJ88AT(js{@B+^Fnqj}A))$-|jiKX(SCx3fwxRT_Hbn5*O)nE} z(>v9U@6ryn`F7fYuW{=ol=FOc)1g&a=N}7y;s6yfNmI9*$+5CE#fAb8VlFPZ8h3Zq8>A%pTyVt^(?s!@b%t3;SQ~P#dJ% zi6gb`Q2xE-QU7p87}wIyFC+IS^6#~>cI(N9zqB%wd8Xo`i7#=08-PZ^&R1z?E^vGv z>$d#fzV*l85+jD8tyngC|9K4FOWz2#1^K9j~n;M{R2lbnq zUwLd9@f*QFvVRrv8^=si`b4szgZV)p>kwa{9>MRUuKx+H1N#HgQ(da>U%^S=76{>M z?+34PzB}+z$>!PRe0N|_`S|vLe3RvTd*Bl4E9bid_$LpS&nGs9?+^?;VY&uR#Wu=! zDtZqF!S&yqjf!y?Xybluz%nf~ABe{`4hW;bEM)p%J_EW<1KiT`INnf?{Pr8uPd)8wU zzPz+3S;X8qe&)kR5*y<-MzgY_$j+nhMQ-9#JcM0+s01H=y*K*D#MV1;nq=CtWu?C0l-j zP0Cv3;Z^wG{|&tIz^fj-2JVOtFK3>AMMd62|G@nITPD2Z^DWWp-ZFhlivHx@rEe9t z-+}$VTIYcyr{rUU1~tpEBibgLlW}0j7bNUlAw>SRGEJiEd(#oEezf6 zg>JIz%ea@E|6|)9-Wf};v$l1*V9aM{_|qAUvN>bwAM!64_4A&6zO(vBI#Xx*uz;{P z{xETR(7FkH+1t0h?O{{X_EjeZpQb-b`x58?7nvOAyam=n&&t0#lb<>d--Pc(eS!=* z$@d&M2aj)4aken$3hvNV@mq0V>)eO%B;ZjWF{c~<75Q`aI!S3~1#r+W(Y@>q&^ieU zVJ|o`{>R25=K^nkgr7l=C?=x?{PqPxL&Ov|X18f^2FG1*v>mzXJUFH}3w}I~2A}Rt z;P?`Ae677mr}=n&3%ukH0;j#cy!t2Fq5dQm!`k1nInG_*^Xwt|ckgZB?(2~E*>>oB zkg+*AJXPFLV!HOt;CF#y!rL2OqR(=e=MkGnoV@AW&Dh+?s`_?OU;(dieO?GUx(R=8a)5O8qJM`Z(IJ< zTf>|e(hKho6kWOGInLS6Hyy;{v^Pk0%$j0mcH{+$@n^2@LmuUu$M7@dukWUweDkEe zuRw40=F<4Vg`YQBCT4(Rc+Rqw9d)8i!%(wk4;b4-FYgw zZMJNMU9?lQDCa!2KJLpoPt8GZF;`yS%(L1T1^=bMLO-H+XGAOCi|k`PGUHpdX_t^D z{j~|ak8lo_Vf-wiTci#2WtHfI0?9U&VXbwJb<>ZQyr8EB3^&$pEVF!F>*QD()jF;{1M9QTwAy*&bQVn_0EuRW?EcooeS;t4>LFwbB)8-@N}_oNKsBWRB~;}N9HW0 zE%2&hhT3V{zNfr1Rag^j)!5n>2()(7hNT&JZ|l4!U@0bny(5-?B=cRK6GR`HH>S{cDRp9tkwK5S)2`zT^++`8OwV6us+Xuv#mO}~S1yiEk9Dn6+nu}x zE!2zd=&E<7E)dFL&%v`|UE}rAIT*#SZ%WUUc*@6>tau=vg+xbqm z_nc$jOP0Tzn1V)lt~?xkPJE}ov;D9pD*0{OgY1?(^<}qYs${4e!j?gX<9A%wOq}hC zoa)6H8L+kTJ>DEhwxmO!J!AU_8B!FmeS3swjsNN3KL`3SN7?Afppxh(g3Jl#^356K zkO_U_&K~HTiL;5mk9nD7lEKay8{;L>BI=WDi6H~9S@4gI>9eC#e#&<_9lpysb{_F@ z`qS>6Vh`FJ&fi*M{Z2g{oGF*(-yXjYz&rm{bIy^rU`qOAq%9{UnKv?z`3x{e);fIi zCxBld7$q(*nhxgjeZ{=!6#PMo3#$C-&IUJghQH+4z$4zC9L4x#$9T^npKR!3@VN3Y zwik7v8_&r{TP5!%dMEny(;m$~{|ukitJvsGz(DrdHHLp>zWP#dR`f;oz8yoBWzU^i z3$lLufwgrtamvX44}gpG@tM1RHlbXb*njKo5g-;HdF7n}orXSwa}?STcOTV!b3gPy z&h>OH?_xa%J=^^i>0#(|{94xJ>APOa>0SS}4_6Lw&fq}#hG@t!#RJRuF2E8CX$N9*x?Z-?&&YL52i`fDm*EV#N-YYrpueGg+|a7nDI5Lp|^mCi0) z?;y`kIcBk6e(^?AEEv}WFJQeE8>QTIsh)mpP#>-jPr7zF|2k|q?O;oRM zTd90@-Kw)iKHIE$<&jrL7CjG~4(#h);2>K#$2?<^5s2_<;!Es-Z)hG1~dx4g|JDXoLKOr;(FdyI#Y01yhu* zZ6UspbF?~$QL0^pJp<1(PpmnO@8tvHd1t!aXX4`b_3K&A7^z1Vi2spMhs)U8rZS2> zRXI!NI#2e}-!mTOL_(n7M*OO1Smg+?JZI$2ia_nH`6YnW+H~ECWo_)BA)7l4( zmT{K6{HLS!fX}_=$|d!zb&HOoZ#}YDxTs7Mwzl;7`PRGscl%r8&+G5Ptp3LC0DsA$ z^Y(Y1*WZ3$tKaWtPL<-k4E1*(dqX<#jc4`uM)lh@v}>%t$LdjkCo=u5e$Vvx#v1aU zufLa$>+dG^L8-qZTt!#4t#WSRS^X`Ws)=!`_Q|eJQSZQe%|>`@qsmAYMJcnKez(sb zBaWWumf!e1QTUMhK6293$lgC6PgG=hq5~hR>J%>7JfY_dPi))(ox~G!`F6k@kTJG^CY^U$;Pp?H>{PyDy{9LI(z`E3xHTNZZMrylLx=Z#)98#ou0C{SEdgL=Frnp1JB4;HX@R zas`wD&Z0c#=Jl+rxW@V(i>>cb`x>w-G%hzRy==>c)LGAXlAd55y-xl85&9FKXL4uA zw91Dm{9BMW_&1Zb4fHSHyJ+p1=pB8%q3*JSnnMk6rmN0S(YV*#SMd!S0y9_eZD{Ps z+1Qb@SkIYhV~xYuuP<`OK_7UJ*9$*Fzv>%#!lT@jtT%U*uQ~`VJCLU>=(?!m<}(f& z_%_09;uoVW=(%@VM?1?egg^Sta`}b*d9#T=!5m(;`f29N`1S4DM+zL6UvxdrH3CnI zrd71@3BF6#OMkm7uVw$PyM7M(F%Az%Pxv}R`a=Eh$*&76uO8jmZrB z?{Mv9j!qm*schaEoacuhq4=z_oTc)uEIjVcloicoZ(<8td%4tGSHdnViLL++HhC1h zOTU=O|J5uQHQX}~vVOfc%lTdx{`Rq$i_HFyRTHgmdTaLB>d}7a_yY90fwSTJY15}N z_1u(I4}syOU#u9T*$cG)XxKFNX2#9?88@?_-Arh<{H~3|^10`~x9Fi2@aPZ9&!*Yc z8Ti+870qyFmfmo&shW4OyQG5kt$K7abC<&-j|5VmWZbp!&FgOTR$GC$@1hL5dSM2} zRT&uS*Vh8Qt_+`HWWHSjXJ5I@)S=sMO&>oaW)%`JK;Jd5}Wp4D!ROT3w! z_fpcSajG$@HK6)IPsjf8?AV=Q@f^A@0$#*sbP3i9>X9yF570`%oX41c6JM6z={fov zPmkO3_)au_)&avW@0S(LYGAWFw`GwDYUcvlxeNQk!k}M6b3@jz znxDhDP4f9R=O(qzBl}VDu$m)Xtq1wkLf*)xvAou%!S24B{_(%5rTMRg zndOJyMZfax`1Gfs|KKA_E30zd0?nCat6P}ZNPhhm<_5dw%Xs2@tl%=hJV-u<{AoAb z*~_>52GO6TPN+cb5uaMR;wQd0G`x5-{49LK(B>H9>AP>&9XT2dvF=gA*qk2yEP6x! zxjO1+&RCi*ErS!3)-}jxAM>EfsfW zSUp*Vp48b2nnwxWCzz}J0vSA>rsbs`KI@^aFBku7hRz-EgvOJVU%)Egy?%?_UWpA3r@6Y%+6tDWeEPUi2X%ED@Udep_H`|vaJp6bn z&O4A#-0G9EOuH`1f&*`rnxK`9_eq~9ru}aET=?~BO$phkxvM{qmaXjLcO9~9K=E3^ zV5R&@_@vXv7!yYsNB!{ePqn5F{MT83kscf2>i5@AxcBJk#c=uSrCDpG(T`839P>8j zrT3{1X>$W-NSAhY)@l9M_G^8`GMyK*WG8Fk;?a6%slIctI85H`SEhi)#2kO?0mW;`beg28V|G) zKi|3giWECBGO{x3bd4)0A%~W$W2PD>$FvKE{1Ej@fZ*cQemz zVs5Fr2u9i7A)m|qrCs&LU99iGHi^Ek;%hcjmmqHzcax57(ZD{;^(k-`{RL0<#+%?L zI=sboydC88sdQePu_62COT=OH(zXrY_)F@a3BFfwe?M*JSH-+#rS`(QtYK(PSG?Ev zuOkm^q&(kVYWOWa%*!X4WL2=c<`&{3UnZupm$l-}7n@c2ziPSTX)|;253wuvU|(uo z`)2ZphC6@EI&7eP!Ry6|*}B&^a-c!u&j0SoxyvY1MH%JC&g>L!w}X$?0-A~21s9WdKBJDRkB`8c0V-SbJ;pL(k1#vAgVo|EzYg1~fm0eBpb^>jPY02;{Gq zy{~J&v*J#4V&i7oB3b?XoU#i-+>0-kI5VP!;9Hu(_n+yngw~U3>#NYp`X+Ri0_9(# zjcV&|uK(!FTq55z!8Yj*@{RXRw-84_opujj#ET_pm`~lFZ*xYJd5(<>8o(y_2jJCb z#?P0KIq+Js`bY2|z)$z?;6G;1Jb0IRi;2@J{UgTKYr*L&RL{F9C*7$%1ryuTXYb?M zMcW60?OWYY_YQp@>mGE!`(@{$O7!@1$lYD9T&*~SlM8jOq~zsp&!?k!JBPlJ++_Z? zSMN%B=OPoD;63Tqi#Y$na4#6jpWqu}Dq|?qO&pz{MppbhHjO<`hqXq3O{4RR*vRnNo|1pAOP6r9xM%J8VLgFwcWOOhI`)xb;m6KB z7@LE5x~T)%7eap6dGa&FI1^9ppQW(s%*ft}V12)jwNm~jT1RDnhHRP>1wG|SVrm+$ z|4Qsg;^bG3gj&`=S#Cn>HKvJ|nYMbbY1Q|_-RqHe*LZSc7qpVC;+_bs)Hl}ic|O3{ zjU_kRZ-Z?HPMkAh!Y%P9C4aNyQ>DwUgf5Jcy=s%&@3Pk%SocYXmqbe(8_!_R{CQ7y zzC)ROeE(N@)JJ~It*@_UXXQ5;xb)mP-wU3aoi78|WPMjS01r6eRf6AUbi84Wvu;n+(vx-ebThM^zNvHfPyevKK?p#=Gq&G!@g>pa@1o*A3+lKt4j zZ`>V7&cZh#*`)dPFny_gOM4i9ThMFkkR_a@@^qN}DzbH(_y#P$y)SSko54pbTtk~K zO>%uKADDyloYZ>m?`qCT{^G;2Cr{y@61;8nb(>-$u|uk$@7&p7Jc_#Iz+l+ zA@~l?t4oO{&3yM4+C3}US-75Tf6U#T1HHX@yvj*0brVNen6u(Q_Ajk1a1|_EnUJ zgU?->IbSH7?`?e*AMom{`#@54fpdfSS$uFxbRJiiUq_gHzn|Z|c4;ny4A7d-vw=+? zU|*zNL)ZE^d<3oiDNe{~~^YW7sy>V%QU9=p$AnM7?j}TT#2wk6G`E6tDT-NN9Vv~)yI+Nz=Q_Tc6dep7F%DlSD(_y*O|TFNBtGpl&Np`Sbe*w@9+LS z^;Nu6eP`RBw$6U9F0FIv*>OIU6u)#+@As)!u`G+uYUk(~Uf69V_IxhQXKY*Ryt@5Z z9KXHe^~TsfUyp|sr)F`f`_S3^+w9>ILoeTx!R3YtaKVm5AA0<_3)+w3)9>jS|J^7) z=bXPd{}i>A9#F^9dG4uQXR@Bb9~H_Ca%9q^m>Yu4B0B(G~NPf(HtQ8sT{@-yn$_AeI(v~0d zdjelyZ$M|dy6X*bJssLx@T(2O%hA2^tESkC5Gl)?551_!ekVqG40-+;I)UH(CfS#q zxv1D`zr4QtjE(kKeZ|=KyZv(C$49IC@?R|+-b4H?KKyC_A{z|+egl5;oyj+u9qT;s7s7mShV0Ct!Z8Lq-sQT*Vmu23yoH>rx_Z8C*FBFG(Zl}yD z@EY%D9qS)I?k>|7)}_M44t^aPbkH{?>@5_}MV$7oFHUy1Ji!`Z8*9qD=qKURLEm(8 z1;0HZA79#955D8usyV;`bK{b1d^d2WfW9@^N&8c@r2|+$SZk6$n2XAT>F7sd7ukPICgb9*DgV;;mH) zGSw4e`)vRfw3q~@?Sb^-5h^uI8!o*hvCe1(CGB(oo!TJUO5&wc=Q}2#^@Mms1QIRg z_x|jCc5)Je?acgs|NZiMo!8#kd+oKJ^{i(->silj4R{KDTt~|_f^XLr*&bxaK(A0k z%u`z)=VZ9?w2AAH*3&9{NF^(}ILHTI8t*Rj6r4lal4+)tNhj8X?uwX~Nv&6LuZfJ{XNiItIEBR8q2>DjJk=cXoT%58P+OTyc#C&YNiaA)Buon_swtO(Q zw9n-`dDPj&7$wSiYHzSk&1=c)ZeKVrD*_Ewf{$Mlm!v+W6x7il;UOhCv0t_K8j5Qf zvCm2HFNJ2^e9B?TjrC4EnfBUR*J3{pu4$H!$ED0&@_l7$J{kX5o`pW5DLGiP>6aU@ zUuFar#pi_V4ZHAD7r)BhzbO}{Jxg;$K2NGYCw~m_M|Bwdnkn zw|O*OnZe)a`#fh|X6)Y0iNkvCcCXFzi-1`?n)r+RlY#pR{I?&#ZqN=)vAe^42Y_j2 ze)3je>knWbTQbHtMxQIfhoytSNBgi7r*IF~_n{5hIiQJ(O4IQ}4|3HWAdhPSdnBzR zl8>6{54zQ38z^hVpR1f`a2q&beM-M0+vpe1tRHMI_wv`EB|m-@+rg&;!}*uL$}`>H zO?eByN5Q1N`ru(ztYZah?(EAw7oTrr8;Wh1urp1>x0uXOaDk4W$N z4Cga2x9z$7DmkVN?uY3;`9@>4FRu6e6!AL|c%*d}y{BtK>>6vfu(s{WGmQRe)FJ;l zaMtn9`1xxsa{0oU`WeTMS^Kj0ea5aA`9<*8iGb(FxUa>Zsto#2%&z9|edsHDSQ`TP z#1z=R-^ta|+koTey-UxRPqFq}lDAuLnrv*=x(Kdn@*b^2^@k7OkI=KB(^?6v5$0K# z`tyKSx||SaRFBlVU(7dkl0C;w)c9x)n#1sfCqSwy-tkp zYqaminw=mfT;J@YPX&Cp_sN3ZBb1Mvv#+=r{O9pZ@w?JHC4MhF8r135bA)=feK0Wl zWpXEk5`32q@vZvrf7pwghvYMIQjX9+5&PDGY?z=Qb9_^@mpMYb0Bg@Nje+vCi%)B= zF9t`zBYk)IT<{>AS>O|#wZ-lU{u2M3-X6+520X5=^K0BoH#ox{GxET%H=@%RGyLgn ze|_~wx?|1bZv5E}gVzCM3hA}K3Vzil^xEd1Xw-Za9nn8gpMiXmMtv4zXAzSh<2(Op zZ_NRCtl<7hZtw%atFd?X)|su}=^WVd-2^-Z!B^}^+%AFsB{4ogYkDHSRsq8u$Z`>U zKCrFtVxD2!G1{~~Dxaf#PTs(tEFW{_bksNPKAHnazEW`{$_3;LqVRj(qCo>38$xr72Dl`AV{B;4_?G zqP0|VV}guOi41iy<<68BSv$_+AMhdhRl?MpLA{j=p=)Hp-;)zn_s!6u^_%(q*ynrU zEt;2)SmYhi|8Y--sLJ*RwOJkb0lD$ko;J6z`BZYr{FWr^D1zMKy97ewGe$1&0J^|1E?Pz(qBrooQk7~VZT}0m#*F1y2LAjecyYMZu z`^X_p{cLRM?)`M=-Mudi_Fw4U|9!Ck0{6ZYx^eHjOZ%sAe}p-G_^F}1IxB^n zl{`Dbv-J*c{?5jYk3gA@D<}gW>*!=n*V!0aXXo&2AM+w!Psio_FJ7*^ z|GIt}eCeB~`NoDd--(Bjyd>F#y6J0XtLP|^2S+$NWF&rp|B6M z#91iA#_0w0Rr=<}vy9VdW2EtM?TMCbR!eiDH&bsj<78~`*z~s)9!b4+T-~q1TU~s= ziOzSk?2CE46Yd7(!-Tv`;P#Qq_;~YO#WL!)B!6u5w7K88_T5CeUGonZ>~*LLzAm03 zdJ$Y!_kuPbc?Eg4T(DI#$B;MJFAmv?)vrc(n45OI1v*P>JE@uih zBXc1)6$=LKW!pVQdG7f}#sdH3#dFR#()w!2anJK5g;&2fnREWS>Bl{kZ>IbK-ua&k zzE_mi)F>Tk0q<28=UYS-=dg%^WWVNZoqyRsHfrx$ht5Fr?G*FPODwG7)L;K{u@9f&8~k6ysBKeVNiH}I>R@%5HB zwe0S5;J;C`rW!ab&hu$rTd>u4y0SBzObplBF&Ve7ElYvkiB}w#^4;CO$>C(m<72M! z%}EL6$E&=@F0ZoT@#LHEjZ1lWj8zs+--jM8H6@&G6qn!&FWzSv_(1|wJ@>b;7Mq-h zwE}#_cEh;}_EnUC6F@ z0{Y(qt;}VdTgj_12|jZd`iT&@A=a+iRR0pk9^aV|G`^&2X3->gl>1KbH^+IucqaI^ zb?6td{&ZM(pp;%zNk1V`eLJJen)~LT8(}ch?Zn*+z#>h>P!=6Z2YS1Js#p zUVHZoJ0iKOz)AQluvMZbcIwY)^-}jbbgC}DzU>bmKbD$*LvzpdHy^3jcJ}%)=CuL; zlD`h~+Rkk!{?)@gJ%X%ILf$v=F~wg0(VqWf1NJ#%;$zE(_!v5qfaaj|6`Id8na{7n zyGZ7=s2#tu{T^p7tsq9KlCfz5FOsoU9vjr6k~HJ83$|Z#_cPGOfX7$8k+BSOCQ=NW zMeSjK(d%>Bb1}~1nQF_4zshXA5n0gYg}na+?_cLVF>mlfogF+>PWF89!n5hUUHP++ zJ{F+ACw44Qi>^U={&oNhy1(K&^p~!T*&{vxZCX4>fA^^WJa(6qTc-2hsKaL(=(qhBdpzuwY%<9lC!cm29Q_EroY^{61ikOb=FQr zMx5J}8(=)%#3oZm&VdHL2~$?GPk^#lQ?@DJk!kXP0eoccWF9j2I;{Mc8yUkx0W)Re z5@anC9rVDHPP}n@df7(uQbG*#=C_ndTzd5G3P<_+K=3m3PwIU!W(7#N}c6 zkwxhDQr-(U$gi2tBDeaG6^SM5)$=-@%O56kdS&NP^3asg_e$b{BByIRP4My>upNO9 zN#A}GG`*Ozv58l9j-yNrd11h~=e=xVFZceVS%bej>jQAD$z35B_htesu~K^{nKjZa z#*ho%@Qw?dL=Kmp@rZI8$9Wv{9UW)Y7Ih_%VwyVo;h#op~Oc29H1{v1J{~+5F)`hd)SvkO$BAm{}WKjR>^aDCCm(I$W%Z@&N3GEBVgRp)~Kkvg{@5p$=ZSwB?=z<=y z66oQ1<{31WE;=Oy>9-W& zj|i@8eqh_jz*FMhGf|%;-3EE}Y`^|6wh7Vb6{;xT_ec58S!_@CuYCf ze}1%;GNH7%zd)MNw%oP1{;;kbACl$U;RUj_mVFWZpkr$dB-bos zcp$~uzcGHrvJDLIOuA3@XKms`@TE0h!4C2o&kh4mKF=gyA0Wq}OB+z&9fugF2x~8E&R@+X2_JNGNftx%IqsQRBXdfk*JJOS z6PQOn6gQ`22)k#ZP35iQ16#)1{2E3a&bGs+et_naWdwdBI%^A&e&wVt>NdMz`G{}uT`-*i#8__h<@ zAb)JDm3`G_t%2R}BmJ+q$#$M0qXbrw6S|$V%EH8#9KUW}OAC9k0?u~GgqId@{!}mX z)yElBwPsfDTkAJ8o(nwV_&tn%eEBzneP`P<;$zV89qKrjUz5&_AV=9$zUwA0g5>_6 z9O1k{;K85FJ})m1_|(3xf5KN=dA)TO!6|%lyI6N$0j_oQFT@_Jhxs06y&Br1dJbB& zv%s5kFovz>2z|__edX@i1#W}%QG0~Ds%Q0W#$K;_bIhEEdSW!Q!MFS4sqx#@8SK=w$Ja2d1nzOyA-@9Ox$ zdyenx`}EI|h0bMPHp$i@{)76(xm%_;^B{3d$TQik+xY%wp6BtrYDcg|`dO`6m5fgv zYYDMe9d*>p$>G@6ob~2$%2)B;CwnaNb=3}|MKNuxJqI(PH=ptD{Y~VJ<^PHY$Puf3 zhKG8TS3F3a8lRsE7Uud+uAAUXdbZ7emJ#@^=H|G}hRUhr%O02Apm*d??Ej(KWxqNb z`&wB0)_mG#J(Ev}Y#^hJ$)Zl}S$=J=Hz(rfs^b*VueIm#gIar14oSY}%yaF#TOT{u zxL$IR1#MctF&3c%TwKDQj4`UH`b2x<0b=-^xrdwyPW&uGnh!q8m-IOC#1ZI8_9fX=neT*{5T(YBbj^v1Pv@cm^w0HD} z{CmoimD&269_pDN?AO}B+EU|=L03kkF(kdeIo9^4KfK;!+vH_u>I2`>7fFXP5SN4&H9kBzo#rZ+kPIr;=Lw))2qBWmwPULkr%9l{5B7tecLEI(9N1{hsd$qwX@+eCQ^ z3aK9$pwFS4fKSrK5q<_fE?KGW{`xBlTWMs~nd&XjomYGjP(C1V^N zMaDRZ%q*aHQw#iQ5XePpt3iuOvkuVm)b~o;RsyqnQ+qOyA!eo?hSF#6A-r4D`X!1J7&M z);HHe6Z?5~J@u%*Ox0)WQSo&!;iK%`kYUa^`^GJkN12E%ClC3qbjM*z;oS!6Qaxe( zUqUAS8T3*Am+^m9+;6BV!Eb1v^uXXqy17K3dH;RmYYgNq|F-dV9z6dG<7@V5t(9fsY_wLQgD+qmMnQ#JKaB;9G_R+xXuh719 zaj)$h7xmJQ!+e|1|B|y($R&H5-t&BEdTIabJCmZ=r`wYFU%p8mt}0j0x-z_+HN0HV zDR&p;{ttLlj<~mJkJ#OgRO)zxdjGuBiXNCAT-}4*Q$>!qvZ~vP3YN;J`Mk>umKw#P z%O3I=IPY$azng5olZ=!?on=**+WF-ehv>tM#dq<|&f}f-HNunH3V#zl-&d6H0@`1T z4S8Nc!`G1O7SL7!ZG~t{I@ItO;^Qe-kWkJkGx_o=)0?Jpm++n19#7k0&$x#1UrCZ( zHBI{?wf``FhKzIcLdN?zyk2v1(4Rv7!?u3a*+Yun4^Qy4%t6Ou`_v+TqO;sAdY%ry zR9v+7br$m;Kk7DQ>guJ8EBIG_lT70?bseuD$H`COII{E!_?N!%dc58EQH^}`!{_y z*spv++i346_z~#;X814h2EE(hrH_GQavIwFJ^ay-KVl60*mc5FRJM{jh0kjG{TSbG z`Yd`lzJs5nRedxRN1}Efp`A|JR(bJ|MBl4Ie8dy=kh|cskrg+~S6XuWeHrj(7uVQ6 zZM+wn=SOW=_D_n2KVh`oe{x_8x~W}%Pbt2`Yebi|!{4wMNM^Z{F%jKnrekwpJhDskHSfz)&f!c+@>Vq8 zo3hncb>|-9{HufTKjv=QlB{^%3ix6Zu^mC;*hAurr4C=L<18BS#X1XJLA=aT&C_$x zFT%TPbGiPVv4-`htri}x-1ov~6R>=p@wDr5c`WdX#%)=e*poZ#^&x9;eW;`_F>kO9 z+ooN&Y!@arOyZ7;@tKSkpbIU4*T&$xGt8j!#8-!a#Uid~Iekgt*Sv6f5qx2h^6EA( z3}~H5t7eT54=#WQ7lIGbLi56^q7wY4E%SWJ6&5XnPglZ!(|}uLD(RcMAJiEF!~^AM zOsog5YO~PO8H_2;8JTz4eaIQ?!=>~A+&@3o%*TGq8ckhKbNvJ3-+cKwTYFP2@~x5U zS9)FgqCPSAs+;M@=YVVPn53F{+{YGv)rNh8cr!UGV+$`XQh9h?n#vzw@4K8n9fj|T z2mYQukLO;rwQ7e^C_Mic_5<;K_2u@{~C-Lu=HR078rW>n; zrzri@_@#me`dAKaMLT|nu7}QqvryDi& zzoMrCIb?c!ZkT<$znOg)b67G!8aT5$8}0A5@m;}-TkZQQ?!zxR_v)J!U#Ba%ch~Gv z>diOKPY{$lkksRm3BVLogdXgREqrORB5X<(IX@}m!X4XL;zE5*`ryM%&KFZyvVVkn`wHoVxhSy}Xmm+ZiJTbd&NasQY8&+cQs(?{u|)KBQu z=E0-;`YeepC;=D{p<8QOE`{=|gH!8^QtPpIAk^yPJT-_iF&+P#}~ zFT%Y=pEq!ZFnqXQad`#WC+UCfo5)=w-G}4DmD!qv{$KQ?K7N}zGN~_{c0?Z!+&#N5 zlY0V^W}Wb69o(+`)<#zEG;G0^@t6Bc(BnNx?2GCXE|0O#a_d?}UGEdK7@=L&m5u#Y z<3k|utoFE`H>|l4_fME*W*@Aheq`;A8Q@cM>JCR|u6=2y^s)847cHi6u0|!gZd-qi zKb7Xw60cFS1mB)+??!y8vRc!fTv@Xpr>>CWM`7o%)Vt3Co8)KRFI1nISK1pqL|K{c0Do=M1C5Ec#a{3!D= zqB;28U?1yXwB61*2u*VT8hsazD~S84qCBxhyB?4&!<5g5Z&t84J?JTBHFqqqa(_3{$mG3h28foY?h`H+}x485PrHlvr1M$srVpD5^@Cc{v#C_D6=b@X# zlNuKCB{U%VTM7MXPUeAAE4Z#>mB+$2GP4)IoQgZ1G@{pGbC^V2!sAaGhs2}YaT?1w z?IFj4;P1zG z5E)kTTQha~&CK3>=ggjt<;*!(wjb$J=8V^w^CttkO@#Xd^9A~mO#DbTW7f`CX>J>g z)$XY#x#lx_m(sS{`8@57q#dW7M7%k=Hh55AFLBn(sQV}2t^mAP$O%ih&*R?qeI4{c zb^PGOOS<^UZ%jDei;jae=3T8@J>^%m?PZ@jgZi@Pr$*cFUfBoCyT&mN_fvk~-81|4 zF*cj2TXod%OuR>S<{01N3!c_fK2~!is@OT5Ga`DoX>63dvU!}HQ3g(`66U}l{U_x6 z(_UNgmB*9%=Gt||^Pt~?T(3On+C$Bu{w(s==8)67oi%0>G}SY8($+v$u%DRgN73IKE#@u$dNOdOSQyWZ-YkJchqPUrBa_oE;k;B9UqbE{sORd8;P8oB>R|}td zj|?zp@E!ej-|dEF`C?D#Y;?!CD(OS;%bcjWf;Z(MoJQ5q`ke8)P02 zP(6f0w?eA5 zInVgH&zg94COpIQCP_BjFy3q^csF>z)*tyAoLC=ee35mg4xf(iQHRzK&Ofz%I#gex zPHm*W;3j1FZt7tT+M;XaVf-<^({mY4S5q`Iyx z?f*3_<0m1%iz`>%hz&{pL{^ABAZzCo+3wIm8|;Y^_0YFCvU$05 z4h!(LM#r!#*$A#lW<4w5D!q>OorZ;6%@6tfkPrEs*x)sKpJ!TSjvnw=Lwdkv-j@FA zq?Qk7IQqaPPs!e49O7K6sP@I(j2Y)C$*)Z@bb$}ON%$0x2^{yH6Zi;U{8h;1A?%Zx z*m>~VtSLl35dY05&yTAkaQxAg=RY&Bp14r<+<`m!PV&eV^yNi(c{BbdzEbvZ*y!@= zn;OyC?(!QGk44};)~9U0zuv3qTkO?u{BpwSD?19^dNK|T?rl{4?bw`lg1zi ziuT#Z>}vk?*77iV>y_xvq(2bN9jD$Mv}Mw^WUj^Z*YVTMKp!E$`JC2#`(0+>dSI;t zw;}26>K!=%eJno3lkovieBZgYE)u#9sDAb5IPHc#OZK$THvZ4mOM$s^o^j~qqwMvT zIlc`Gz3kD^nfsJ$h!}I>g*{>vyhq0Z&xiOW^jBX=3{j%Z{^>z`L~<~m+Xxvp(a$vb z>%snKXn(XE1<9*F=H04~c~^J#cj7In-4mk8U5;)rWG?9=2YIi-m|YF+J_{@x;&#ss z33ks={mr%>;OhD%eP=7PuwHs7V^K9aN#nj?|25$EEbDbE=YsZE#d)2@+8`S0LjDP_ zkPQHNoZLjv!R*Sr?ffCTENs)jK6?dYQpvZsBG**jIs9F9{M|!(M?JU2|Njct@H!`V zm+FN+XNSMVT@fwYTkMdeOdS4bMCL#w|sN0uJ;G~FK3@9+~t6~J9m&b4!-pw zv=i|Ne@;we9lUxU_;X>sif{Mv>@!>=>u&CA=UpASj_TrNLiFd;ynAUXXR@rba~>`F z0k{*57x7%YXXGAm8}o+m@TEwy{hsm|x8$#niNjUsbny^_zmfb{ z^U$qNwv;YtKYVkkoo6coeD~S%V@7Lc93I#1`&Zx?W{)2Yj;^ur!J#>^=ws%@@OQ)K z#PD~+=fv=L!{@}Xcl7gbofC`xhB+~;%*Z+MOJvxQa?%0#pS{jHwlX_boB6Q3lCjvd zki2&22J4`;SKw{H`n;*U3iCYC6lhX>Q|~_D-DA*|yZ3cvj8!@lTH`)UE;Cy@nPcMD znTB+q8NK3p`c}I73`21f#Mv_TtB5U;oUeTba$1{g+QOG|n@DF`i9SW|&*8oN0+9(j z5@m{!`Wy9Ac0u|7T?6gK?A-P*lG{GJ_eo&*uNdvb_r&5mV9lY?2 z#u!=2IMzk%kNTu>({n%1^QhMu$20XmZXe|@8)P3MUHWy@e?0SZ{QjwTmexGUQTxHEu9fgZ=~23vtxuckH7KGw1smpU+?@6JRRC+3s2Vm3!_FpG3SZ@FIB!J z=e=PT*nUDPYfx77=gOnDt$zMc4wo?ftl}p-K6Xsj5=W;u*p6wm7^c~xI-GGC?nmvM zf9LogF-~k>>pCV=-%Ngw&F8EdV(>14K18EU=wnz9Dw@*q%hWZOK6AYQxSK9Puc&9A zcFrx0Tx%?nuCdv%mjoUMwo{CMK4p)gkCdK%HvA5}q|49fZ0F9kFb6{7Lks4k*R&d> zZ^u4CylrOnHs*d|spQgRTTdq)_yOeb-@yxAef&D^6|*-NeA{{$`h2d-0nVzV?Hw&(p`r#AN*%eMTAgku-FG#Aey=`CfhUeQv((2a%lSYq0fgi{|%- zxNk*A!+xQ6AN@l|*1wQG>g*oD6~kZLh5asI7i{9`;88Zd9MvzK*(J28`6m8DTlRbt zA6g1;H{iSCNAe3B`JTBL&^z^$n1{iBTBpa-Pp5y||3X}V-9Pea(54H|V&D-@r~gTG z=;-mqS1*_p{2&I5OB2TBJnj|WsC>A$FlLk+JU>Y`3C2TbCq(nzvy)h#78<^fJy&S~ zb%yXyve5A*aUbM9k(W60Xmr1IYae@-r;J02AwXv7FYy}vmGM2@eqao9Hp9R}#5t`e zo^AqqxAEv1zs>&*MsZ7OU3oL{@;zJlf0I$XQSW$m^!w zPH`7|ilaX#HidQ`B2OV>Jxjb|UzSmk1MC^d!P{sz#+VntGkvz5#*S5)wE){!5B3Aj zyF57eGH3sE7yIg7_Ief6Uj%-DsvPX)3fc+d}s{5+4ww!eE4hQw=aEeD3yD? zhbGI2Uk=j`*|=4&Yr~U$o3oJ{lC#)5@m{vfZP4Cv`g1Mu4`JqKjQ*Sehke51dexCN ztd2^`OeOz^;ynKk_^gu;1~#c?`Ug)d-{8q6kBF}Uzpa|0G{t;7u^m%5uS)9^4h*~Z zF)tqA|0nplvWem-iowHd>UalQ*BXD4HL@H3N!b>)c9lXqo2Y*hw%U`x+%BGl%yk&Q z04Gi)qxDVByJ-bl3LoW+;X`X}nyh7B_W z|M29`9ejQP{1uv-eHeP#A^6xUYwerDbM&3Fk0Tpt{T8o47hvaJ)m1!k7BmR|=*WWZ zy3lQ8a3#hM*)Z5YQP1$5Q!L(9d(ZB#KAN5HcTdaDDq+2`^LfN|jh( zA+&pTy7hfWbeqzU5~tf*@~ooI=(w|9%jIby3plN0|=*&iq?^`HR%vUfPom$-(Klt*;_)Gq-06*L&iySimB^5;_ojEG)+4QRf|YY{%E)Vg46? z)*9bGN*wbmGJY=9|Tw$JH@VJFbP8u=0W0)RuvjiYxIJ z-*6D!+1%0MN=lX2cGl6E!*wrH@%_<0ux?7HTU4v$apP{wfMd`6Z_glEJ%8=jr=M>x0jDd4HY z-%NftFJhOs;acyBP7qwzjRMz^I9!WIhwBLR5u?l?TqDaR!ZqTZ4VPDV%4+?Q;DYbG zQ622>nRZ=^tyAM1)5D52Tp#TJ^QC%jv=m^gvbvLOo6zeSR~E`1^8WnN;^UN+4fNE` z`Nb!xLvpQquXidVo_v?)0Q2?~b9R+|pV9h0_pxPH7KimeZN1C?vTMm#?esl|&YIUa z(Jwsf-yA(D`x|tC#_nS3K)z@z8b{@{6L?VabZ(7wj*&;CJO;$UnQbiTeOv^-*o3#g=mK|gS0cUTq5m^c<0P9CmvutwDbNn@}@F=@P>5B5bWCn z;-g!Do&E%3%iNf(%+}T5MeEDyd&qH5y+d(4XV-)O2%HT(b-QC{e#AXHT6Sjl zjNuRAWvk~;ZTN5|KG9QsTl;bhox9KdSJ5E?G%9?}Qcb8s4du1C!1#lj`? zm-u!&23GzRqTA!>n3JVb_fG0PdZE$g@f-FT)*D!p3$b@w*gj)FKc{5|{OcZkmXPuA z!A`0E9&&gQ-btCcMGMT-6nsbVMa`~`94IWpceFxl zv)YzFJ~_Y|j(%c-d`zQD@X`1jb*W8wd`n1ma!z(3SLCc*Pi?UC$PvjeifuHaPvYx^ zt^d(V{o`5;u5ylsnpe~hheG0Byb6YReeJJ_q}#5|l4X4eDm z)VC!)wjaY~b>pQA%5K$~G*#`y-(Tmx=ew?H!8X_5@~8CewVfXF8p+1tYez2*UlgZ7qYqdQEaAx{mH32v0mUV?_Q=k?Zijs;mfDGBD%`<&p4^hEa>C} z^Fnx$UU>&J8vFTYTaJ=XB?zBWOh=Px9Q!V~hu&-&Iw1P0I$nfdbDD2|VM8ypv>ZBm zW^HiO5_4Q{FT6NnW~LCU-X?v4=85X~Ds<@9p*bS@p25CBb?N)o`1dK4la5mKs&kTi zrlv5rjN*m-zX7;=sJAzevNa5C$d_DWuCcQ5kig^F6SWDCvH9_Jw`4f)>_XNTZ-W;_ z>;CoQZ9}8^_gC}Hr3KK3)+_8~vg2z%7=e%Iyl3RawvlH(OJ@gcv5xn0f7ClU8bel8dBr=x-v{xj{pV!U zSd)m$r=UCbbqBrRaF8dq)|bCFZF2N5^@-l|SW8divo5)EH+y^8b6*twQN}Et)nIYf z#tu(rnTvPH*V1>{GzxuY!cWlCSFfk7Do;*X4>%Wmhk>UPUSW@20=`}P6yAk1(df6q z-(R!;4%_b{UpV?zSKn*|9)nlL?IHpE+b-*-{pevwK7EjDL0m?>2>N`?@vF1XgNL_P zPmTLJEy1kYl|La@UKiOH3+cE-IP>=SV7G-XYm;YKq9fjN@&!WeD@~nE?kE6>%@H*f% zZFx>~N50a)i`mAknaJ)77ewDO;bz9MDv(0uHT$No~sJtqnePr>YkZ* z_b-qI*36sTAl-myTIZ&d&j#8&m)tHHt?SUApZG*@we8!-IRbm9=2hpjcK0zJ1Z$?f zkA8V?z^KV*o^6A6-lq&c8H@e1oqP$I6OawHR=?sY9H36oyL|S}pm*p}zL}?3$9v`* zQS9K*gYpzgw#g2hN4$~z&(vSxbOHD{K<>!*nS)LEYqD>!?F0Gng~abf!)3LGa*U*cx2DR1E2Y)8|O8T&ev&Gyw_z?H*{_P*Y+4uSX7OsP1 z4*hI2J~=1M+Ul=zY$45*iExgn#XfH<_u6+}!#CJq%3NJEv>sjctB>a;gZ>l={Xe&v&S z2lZTSDF3v+Q(4c>O}U~KY|bIoo{rSRd@9*;4Gg!cwZ&?{a#=%F8@Blzu0QInd6F^nt^LNPh2S<+I@dnu-sNw$|G|hkGTbl4KJ$3@P4v%Q z(kFBo|D<79hTBpjM!n0ZUZeI9bvOD(8(KlLGSDK{cZ%mJ5ImQzqEbG)t%GQ+0}W1ZT|)g z^vkw2xP2Df+K*`;)`i?5U-j+KL2ttMit{%(H^7FMI0kzTz?c7)Ij|7jsePWM*IU@d zT#sQ>KeCLqem(OZUB_eKQ*-1G3GHp<-jO|>{m*FmRId0f_=K(p^IoDKwU7&(yq9U| zF5R9j7s{|aD7GKU5WNV_N0~>Q=XDI7Z-sOtJp<$+!N0VHYhisPcQLeBOBR2JkI#D?ut*^=VQ#l_dxL*Kfteg4QqHKzYnQjvPT$Rh)mF7ft&C-&i}{* z9h$q!>5d##Gu7MJhOD+vAvW%_e z#DX5O3Ks^BA|poVM~uAb$=77h+<1vWT5{!>GS)=cFK_0TVPrI@{52mA?0z5nr(#2F zn-+UJA7y1PLiSywGa~f-M#e7`kFy+HOQy-kHZf1XlM{>i-EjxM2l)N1{odhuV6flR z-|^Xazk~QNbTKX(w^C>reU=TouGrRdwC>@Tp5yZSI^?7!hH@v>bzDL}mV$HE&tmyy zlvJL(g8wVbxDP|g7qeE>^M8P{DU+PI_>xaeTp{^V_$*K?{b_$q4>(?^GVp4RT}eU8 zic+4%PVY_kn`ZS9$}iXZDRt#Ls`85Rzj4lDaswRVy!zc?)70Lq zI4FHAFlvtII_3=ACY(bv;@m~q8CaJCT9*Eo<9tP<9)2H9d~} z%=@L>i%yhdyBi$3Wx)SnnIQL!rM)gDuGI#ABmLZ*etBDuGk*J7CpxkDWWe*j!M*H1 zSJD=3?gC#m$gjnredIYuw^*{Tuow6%c2RCIHet=B_o?HNggUCZ7mu9DyKLq}9ezM} zQpX+OMB{1U9}$KJ8=P|>+*p)pChyU_Bx8}<8wZZ|Qr?5iP!G>N;vIwjH!1MofRhuM z{mQHs=8|x$816af?1)J%^1lww_|^i>0-o78`Yk>dz#AdYh2Tfe*(Scf99Tkpf3|Zg zmhxWf*Tg+TvPPm`pNmh~h%Myv&=h0VgH<7%$YEM~xO6%ttBh?%B{l1a;u_)!?@mo3fpfj~TwL#y>S2E057b)9} zLbX`{9hLD5!>65gQ_pCZ^IfVV*+#{8$wfl0)@pL(2YP8gjJ{cIhqzus`?{aN^;L61 z^?t?z8_J?3l*dl8sFij_1M_$;-F0dvx+`#(%r}>;UcM)0Irp=<7fi~t^i0~mFIia& z%WS;`uvVlsHZ#jMi#9&-Qf_pVcbZ0NnevdcU-#Y!J#)pC7)p6i5|Aodcm{zM7+_cAGNQuLcYNpyg>hJK){P-BX z?R%>6Xl?M9?Z5GEeHHz)54c(c|N6>bwwqRiZ@PEu+q{qQQ`|r^^*%%l_`gtY`P%VY z-_pB!U#ooO)bA?8*HA)?19pW&3(+SQGDpUnOasoy`j-sebJr)b8PiA#;~^Zf?lnSvPrxF4yd=InQyT@ zbn5#MKOc3>SRLMD+wu6WpmatoJThc)a#y8=$?R=L}pV9DB@clHuuN)s> zsSma_0K-vlN`Ssqd+0YbX2b1YUARZ@9s8VBJ)b_}A5N^dF-vq-W%r?K&vNFMo@2YS z;|xa5L-=RMna?EOon3!i^0@5k7b&-E03SKd=FmRQ+j{b4?9j{bQAAcD$J%C{B|aDY zSZSOMzZQQn#s*t=v3RJ;ollul{4f8m7S{e(@GBHud-z7WUg;4EIJ2p1C;m_#qwOGh z!>c}6)meyKUi<+LOm9BJh>m9Ga zJJ%VrXHqA;KGS}`KM?IhHn8uv2cjq3`}(2pZsWUPoiUraS^l0ESUw2eFZ)ysd$VEQ z_T%K!t2%p-xnxUSQW$)%$7>Xi^K^9x_ZH`0^mxC+c_vna`gk?J2=SQ<3yt&&(@ObI za5iz@KTNTR3Az2F%Fce)ss6`&go^9oQ4P)D4o5r?Xlw!=n9#{S*@86nfMq6%Jz9+&@ zvTv!Ig9;p$LZgvqCNA2*FLmvh#TP;s7I2(njxD3jj)MHB8^=JqJ=78AT5v;n53&<7 z4|Y-S^nedO7|L2uw!!q3C1Zyi9e6=+`7TxiI)0q zCM6#*$0R3De>%4y|980y;k&(*S$@Msd$iW5e$i%c8E4Qj-u+*|F2;9nF*nt>k{b&4 zXwCM+TegD}y*~+_wYJ-OU+#ZM`BRfUZGC=E+kfyp@_X>c@Bhc!nRvseJjcZwd&8Wj zKQV8iyV%Oh7mjJ*qw^pppnxf01L@QecJ!=)?H+rl_q2_BaYMN8p09g1#3 zYa$uWv%)=v`kuO6`jQUcrJwe1A=4rgth#s1;&u1l+bJH}_1J`{cQ^aqbR%8!x-g%- zSn!skJU_*nx8R0ZdqiiRwfA<$dcAEsh|}s?X+@WOa_q4%bE%th4SYX`euzIr(jLs! zJk-3@Ts$jI;GO@U;$-dLhLdJ+!g-uI>x!LxwflArY}pUL#lFzCmg}j>^O$4v+CtYD zlYWrjH@ErT=B>G`%{SC~7q^{j%v!*@(hiR**@|3R|6s1pj%4mGDy+XSI@4!0xHw*w z94x+Le6V=k$$L8^o~#t%@~5;_h0RmE-zxP+h4US}H+{Z_pf9swHhAsno!`cI)I#;Ey*>OI!laa)s13maN*n6Za- zKaKMis}}%&K73^ZIRaz!&kt`rEdItf3*bF^mP1*M`*&zV{de0`8_;hiwu72xU^+$q z6_v}te`hK*;=wmov>_UaP^aia^kwmli{snqcNpA;z#GrorX}EcX8(Yl*P{!2*UPCx zeH!Xd###IGEaO;0{|0_NrY**N(|s@Pt6$45+t_IT&%N+`OZ@)}F58mJnpN|9`Ud2B za^vy)3HG^Lum9SfbJ&BtYFgF%q4S>O+0k}zQgXv3dt%go8U2y0_?CCP@}ZS~1zX)- z@My4Z?A15)J;pcw8!p}xGsdT|j->VQ-PPCMwx^rtGp_&Yo;UET7%;|EN7nvqtFQFZ zhRENaGdX|^h~HGo(aHFnU4QYOcjgA$Bp=3p{h2lsJ@uu?rfR2aoYT(sgm#+ka%0+# zQh(@%ukKNu3uymR$*p{^dM?GjVSf+*lV8oNW>WR`z5&M3vd2;L{ERWAzWW)o#4$Yi z7;}U84R;KG$-O&Q#7D53&%#gH&OuzvxpDn}9+-UsTUv;D@JKVlP*ez-z8)FTmoH-AHGa~hv+)_Z=!4Dzd^d*!FVLnHFGm92E7tvJ5(?H z^mD{TMC|#JYSY)o!TFLsIA03$sh2g|o-g?Fi9g%(g=coXtjp*yG6$$X1}%Qz%_S9~GPE0pJTFFt*v@rqtxt%6r*zpHg( zIs8IwH%El&_H5?u|gHLRPcZg4H>mI0hpnG6wKBMo^ zKI5;^^8)dS_KlLFt9r&HU0DT+lWFb1eH(6K7aZoIu$dzM_=(FT1ba|WBx6i|a9GJ(CSmVYG z@gVwqwl&-3ZN$(HhY@~c!$=!<1Ecy-$G#sp7Rg6h`)zBVk%pf`vEaggv;U}kp~e_( zyT}cvy`l6A_ub!=o9szWiJ^CAJv&rV8t9e2q42Uu<1BNs?L#=5EB-JQZ-X<=5Sa9N zd}G``z6p+=OE#__ozIRR>LYaPj*Bgq@ZCs0`*Gt?nlKJJCtk3m;y-{MI`Dl@M%k;Z z_iyAh<-S3G!)qVkI>0__95lait#@&VwaJ%ybxQRZU)l2gMp}rom>YPm{h##JZx~Iv z+W&>g#moFyv=>>sn{~O_lW|b>k^A;oA>&xEKhl^%e3twhyHDYt?Il++I$X&b$B7@0 ztq>nyS3*MooZm zTFyCqpJR?MMfa{Vrt|PslW$BF|L?_)9h<+exBwmbRLuo^IJHi~A8dPM*tVCZMo0SC z$}d2Ba^63}xdhR0H$LR2FCFv~`;habWjpJ}hNN@)oO1=wyF4+sMIJEA4l|XjV!u8( zVSiK81$~b6F|fxq!LDd!I_EoDsourFFiUmW@W-DEH+77cbwRKNT}y8eS;##T@@;b5 z$>vdu4^W-1w&vC@Q9sn+WsW{x7&mt>;E~-0r}F^O))Y`;`W^g`;fu9IWxh9<=^PbNSbQn8Q-Rl z^k4WmeQoC>ij$e3d{K5Dj(&K=2k?dc$YvjLc1=5T*<#+daZkDEA1MEE{;lMo-*p4O z63aLy9d-e++45Ka5W1OEmA&=EG%I@I9iv71_PL@vZ!UQOUutBYMwjFp$3m=^es6AX z&$&iLrzaO4I*Ie!bCvhH!pU*r`0P4Qumv*rL)|Zcr)#`mYW#|ofw&Y#pwJFL7T-@Mmk>83xUcq}9qu#hBFAMMAA-xR>uM~PX4<~l!`pGrv31*d z4qVHI=;FSBZ~t0(7OVSCp%>qc-5>tjTgW%z66HZj99!~&$NTaN+Stol;)Pc^eagab zJj=$v^_xE;`26tvvZ9kwom*5?eF_& zU{)&qvthQs$+gc0JQH50d>7@_j?bHMu=l!POU#pdQ2h5G?JV);*uJ%QF(07A=snYn z=&!zI^l3gIpWr(`sTaqecaf_rK7bDt1GAnThF`2s_~x;MZ&c?kyzleov|dDceJ>i{ zO^%q}>x>rKIP@WY!!r&KJn<|1MOH)K(Y`5ei@M4?7R#{SS9Gk~n zlw4WDT=CaCv1Wxvu(%3d`zGZsrab4Mbr$%H^s1`ci(1hik~<(!cwVq5Ty;?qwzb*5 zc=;KW)i)|z$T!O=TfjHSmdopb-^Vv94{z@PmgsI^a^-=4e?oTWkGDH*S0AuHe=whO zJ+Y5}#>S^n!&=j-7{7k(Xto{7drsh#cWeNCd&N@x+~o6SAcOghjOq+@G8x-8<=zE9 zj)1G2RCE?AgKYuUqe9?!^WjO3D<#hC2y%?_=j$xi-w^Lhd~{$n-)aq956xJ$wVQl) z?hz*^-Uf2qZ9_-)ae49HK#o$J&-?Ua6+CzYwk^pBCFsf2mlyCyQC_?V&o`4JNNcWq zUS3F1e*Q_ESFODK4qW-bA=n-Owtbvyq__$3V$LAyaQouKlPPb~xdGX!4zPwwHo68q zc(Lb;bLMc}>&dQu7P& zEC+AG#crOBmJ_h!B|Cp_IX)k|$o)1fXJ8$R7Jj^-kt*_B;jCd>Oe9}n)=%H1%Rd|JcUSl(|9{a*!alz`7{N6@8A2pG`TteKi zo;@yp53;; zr084cjml;|Q0rqEX1*@QH+bi_#$nnFLwLrqD>>JJ`1E>befpptL3 z&YhqS&r_GXZ)xCO`iOkmK#$SC7#@KxrXQW&F7>q#eUa8xn9r^r{x4vU0AB69Qm5gy z1!m?!^xO717s^9XjV!_%*1wN-9#4Q-Ie_Rtq1mh-Pbm(Y! z|3=g87`q+0XNVsTjo&f+mBlZRS@8MQ`Xm3n%whSCW&?W_a!e00hx#@03=idb@^dD+ zY3Q?xyYVPRTXe>umW*e)GmGo!udQPndE(GpivFoJjT<94mN-o0q{|Kcz?5 z#5ZTmkE~$PjLcxs|8{;bPWJlI&Rp+eJjxTsJ(YX$Wz7|QOYC)FiDC7Uv$EnpS<|Gu zkuI0(D0(Mn-4T6Bcc%45boF-$^`Gc;>X$!JMZA8kJF4GZm$G>0u30R~(9gUms54k_x{|%$XnuLz~i^TDggU-Lt>?7W--+#I(q8N)rzV!4j z99i4O|I&hJE&1D)PmDA||LskPguZrJKnAp^K8+govhkY41LaD(u z$Q-i|n!#f13!G0@H1`Y4mtOpcY9%X-ZE9RZxv+0CzRq6TM@&9UiWLl}o|o>gcb?tO zGuuy#^FjRky*1s$7k-ClF@BQUEpuF10s9T~){ADCDami)n~C3B=GAGz;#7Qv%7~$e z^v7dA@z>b)VCmgY;S;pi99O;0zK-qf$@^~O?@aR`eaw8C{1743tXO>rR3N zO7NGLoVJuPbJv8Cd4D;BJy5!OT)JhL$?%Fv(ow;`D;{Gkw4N}>%duT-u0-ZuL)|WK zoyEQM#gY*=QMNK(Ml^Pl=#cN!rW?=D^_O@5V(>Sz{TG;nsv951&6W7pjr`6j@9Kg% z$JN6*eRf%F%!EW z(g~Q1_p8r2zKUJm@vZ-kf8X1weUDE^hnd7$A^u5hWJd`7g|YOCsQH?4=_h!$6knri z%D&nkd{6!R4CVik=krzmqk)e1p|7`34?NMHVq7mD*#lhFUOQ{~#ED;Krj9ziilr@2sQ|?aw#+1%v&*l5_l(kdVl-=aN;kKLomHg0~WLo$mzG_;% zva?xtcXdqk88-q~YZ~=`pYz!+`@64Cxx{npkA*kcO530NTIc1Kabt>^dwJT#+o!be z{%YsibBr5r$7k_guA9tTrW~lP>xhA~f{U+yY$Ej>=wH=2!<qz4XGHHvUNI$o%~$_o|6e*Yn)!EkO!?v%>biE3{~E3}{2t6*Ib{{s$z>J(x%BJg zylebfeAD^a>-;B^j2l-a-!)~+tvC2j1@4%VyX0E`_3G=goBZYKYuU|y@koE3_eIgt zweP0w({){+$9Q+(ruJENpDOut#WnoxE}8vyl)FyVP!BJ==3VmVl25If{Z8=*U;DI? zG$~y$SXW;0Vsh7}@(E`0!r0pJi^rPT>9H}%E4G1uzxhQwAFJQ2tl0+b_|31^_$oOg zIAcSu_{aD>Cyrr!P8FKTu?MA5cpn3jayI+X6`9eI)oQwoQ_n(pFZB0l0Jbm zDaRV;5t}(VUAUKzdO?zL{S4FEXc?J{mqY7K=%}r{Z5`mN5uCIN7b!+P|2H(gs{6XS z4sg?|{|EGcu&zVp?Em>TC2#0`ig91Zt2}ex7g8=@j%iKl+SCy;Q=YM|oNTYx1C=E< zza0yl6M%C(aHayM-BSN zVB5(5Rd(5pjRS%Wo2LuAU@DBaUpX+q>*4$!pY!`dX~8zhuKCo%nw+Ls^95-}+m(lq zHSzO?<~!~|euiK7+jbfBK#r{=chLS}$9=LVmwe~E%O3VFyaGEQbpd;Mcq%p+>auMy z$l{4>lHg-)wdH=b+m4*%H_YBT=7zxSvjR_W~a6RBr40N!#1Q{fdUv{^Td|DySptZnH zC3yb6VEY^ED)bJot7XDaU3t{?4eE0D5sp4HyEPwPAh>TKw}*7C*N-Rf?EgT=`aQ|7 zqv%-w%)VE9LFAE9b*vS>kvdjj0#?EF_rMgy-(d)5V$so|xiHV>J-Wrf9DIh_|C>6| zTjKDJ)QSEIcr}kYEyue%tg_j_@3^of>_*O6+zo=yKb zsQW}_JDcva^l@^1kI;J#(|Il<$2xQEd&@dDl3!8Ry9Rme{o{;Bo58&$ zABFu+j?FyN|B`r>gQqjkBXG)^QPKHz)YAow(&s3Dq|RZG9^wf43n%Xi_C$Duc+)2G z3EST$#D^=N{eF0h>KGlCl9xjN&3?P^363ss;OLspgvZsPj64%X{kFc5v^W`gAjH|; zej{f^8L=v1a=7`8>=nzo4-v!TH?rc-4em4J&r`Wiv!7c@$t#fgN0pZ(b61a(U;ah73d%DXjX+vXxy(blw}nqwHLm%HQo|Frie@KF_MyLg>W0-XrbENX)&4YD*#AS_}Ohz$X3 zFhJPjN_Q6MK$a#81O{h@` z>aFfh3pn>b^WE>e_fF+^@|>sMTHkuBPMxaL=g_rVV2**b@5Oho@37)}OZx(9KhM}% zm`4aB?sLU3nvbZtU~@>fVR%O1!J{D~h7Hq1gjeelF&lFw%s*COJ!e4!bQY0L$%L%EEY&!B$#d}ag6t?pm@g&6g@aqzkrjZshIw+ysq^R6(D-!5$9aV^9 zwS9Z^L_57JOz^jl$Jn#~aowNhMC32vA3r`H{zbZfKg{XK-w6MN@wvh>USOVs7mXpU z3wt~RzDYR73q$_?J9WSFd(d@M?QaCG^`H3D@82iiIz*f}5rRD~FU=ba`_>pm0^TKf z!fJC|h-(|Z*G1p%!hSS#K2jsx=$tfdYcOn&`rCrEF^ zalf~T;!zsd9N5&{*>-~7gBXK4w+^wLIDQe{>)qiSS@Jjh;)!~_eT`z)Q}i1*W%n2_ zF+K)-M@oP1k-T50chl3FHpa{ybe~S&I{PUk6!Wbx?09@3jbpl3iIV%@%$>< z?+frv;Px>59$FJ)X5g8}m(kwjf9wL~kLP@Jf1&#?@cCcJ{wCdjdtVj)0`@oS{=Izu zcyeWQn)@pc1+>lPU9 z0zF&BWlkFh=&<60Mx<}xC6>CBZmf(KXfpK_==2+Pb zn_hzZReHCXUIwh&g@=hT^!&*%*anTmGlA63R>KSUH8Ky&+p-a?h7E(!}e#J=#SscBCHF15&A@U`uROl;YVTG47L~IyNefexl@Ku zb#i>-2s?xMlL$L^u6TjQAX<~OcI`LTg>4)$McgOv>E;OtsZTL{)d!g1KUZ8uQxkGIyo-|`Z*V*1S(xHXlQH6WqaE#xnTKMbXh(~LDG?r0% z5qQQ1>%x`81JYCZsPWD?(YAwr(-l!~-7sUQZA~23%dJ|(2I0i>eOWa!hBx9Eg=6AuKLcyShhr2&)nLQ@LGJ8ghaW>W@zCc+Q%;;YK0nOFy zG3MFfI3G&iO*TYdV#OTF(bc|THDq1dZn(Elvz@SqUoaoP+rQT<(>i-}gyrm^oR){m zi1@U=lizj5hE&8$!}C3bVJ}Uje4{R_%`hS?)(f^H-fSzK<37lUI1zY`oW>z5mti={ zupE8`lZR~(lf}M8kd5o2&F+kPwTb#k;ix0*?OsZ^=f-gvlwKB&5r{vB($LJ8P`#;a zGf`GMFj9-S<18FT%BMfx!M*Z=`7;OY!TT1v-4UmCxr3g$r1xXoif_|=jW%3?BM!$~ zxKAvgGNMe>hLt#nYi!by$hA&%-esaYrHUEMOe)WAt|#3%cT-(%S#^hcSli8-}u8iL`FV+VD_3 z(|s1oVAr&{*u!D@S-y6q@~M3HAYW?JX5>xvQ2mYUMuYvdn*RN^lZAZ#|JY74;{Cg9 zXD-rm$^N<@?>6a0wnP1c?B{5R`I51)E7ecPR;WFwExK4^|*+F2x8HTpSvkKd9hd=4Pf!mwxVmO|ijl{hm^nahDYr%#QkatC&dJ=ugias?1 z`9?i#IopK(M13Txi*|MwjFF=^MvX-NXH#E^0H=3*?>he5f-T(FXq@zPy<>wJ=}|pa zpuf4fznM_}9v412doKDN#kZs1QN5^6(8qS_>w)E`eoyw1iatLaeQr7S#Fwn^zrvwC z@i2`u@;V`(pRx-1ZnbPYTSII5>U!W5;vE$Z;~MtDZ985Um{V#IE7aVd(xPza@AdvQ z0j<)5cg;R|leoun@`(m-EZ%32XU&OQ@k|8LIsPQ((?cO690tFmVVF}QeSO@-9wYhd zVS8iD#Cs;_-dp)?2^D5ze;Jmpy~;m4z`q$fRsN|V{_yJ&{+8GaBBcGwS{J`Cv|>Fl^2_aCQC2hXcfnDJ*w$BVr|E9P}xypP0#eM8Lu z#;Nz|*=au;?+hLq@IJj^_}#}0{MN(TyI&9cVZ00S3~5t(Kb%3X>iN|f!r-2BUZ*q2 zy?D-w>UiSg)9axb*U=x&AooE2^%>;(kbimxIbEkeI)j{Sy#Vt#2g1F5hHxI(M#>rN z!U0|EIzxQbA1jehgs96pRi709g)Ryky5y!aUdx!lcr)XpjCG9r8Cw}oGMWuCy^)M@ zjMp>n?Ih*-%r`PV&REU(9Ag9HdyLJD?TjWoU{7u&<8;OZ#^sFb81G~(VytF-nX!?v zh4DK^W2nrh52KawGDbV&BF03<9LAd&3mG3}+|Ag?_%Y*Gj6X6O!#ID&VT|J$XEI*R zn8vt@@kYjtjAe|^F}}n24Wp6U$;>#6aU$aa#^sD_88anVe!-nw6Oz7&MFax7{K2VdQXPgXCEM(jDFaqQ_HmGxKnk zTbZ*uM060hGspUd?h=CHCo{)=nC{#`^86rP$Xu1Lh`Gw&%N+A<-PHxff0;Rp#ct-9 zW9hDuxk|r@xw6HB%w_Mx!wW&YB`ADrko>D4-WJ5$nJayV-YNC3@-s45<>?Y6HwE!t zLE+6oyl)V_8=Y?#N&hFF9_lZ zLHybv?g-+^LEII@)0iuLxr2C4Q24wczAlL8GgtkqFo=7YEBo{^SN2lJ9P1ytYYdV% zGw;FjcIJJVo9~kLhW8umE}FTLCj`m!g5*WaFXH(1%=<8JW^Q3_+$i(w$K1+%F!Olk zD!!X}H}Z)a{{xdjV6)b|H4w==gg&tpD}xtIC*%o~`i@wkb3cb2y? zSL0{!c!{sn==rPgX67pWXy(ek;+ZS`xS99n{DRXrvOTKtEqHti?%%=Vfi)<8u>BWu z{^xN1OO_-$vs03l`oxkYNy#ZmX+XC#JIRTq0_m14$<9dzR~}22mEu<6|rQbU^!4aOL9tDN=}NH1R8^<^(IafqCd_j!J|KpYp#>wFagPO zqP)Z+5svI+QygAr*R zPKH4~97iOMQ8)(Sh(V-@NMJC|*WtJxhhx=JeM&O~amhJWB?fWQ=n&z^S>sO0T&jWz zM_MKXmh2p7PHK{6S*|lHS(iCTX&KA>;Z{+5Lo4vlr3PY&LpGOG7a|0Qa3rTJb>^ng z>@Gt^wIrowX6I(5P!bNZ1!vaj<8%6f(NlAz&h_}d{C%khilGPHoGh~Vr%Q$Om70~4 z3r!@aX1mjzNh#?m89A18r(4xC#bx<}qDZ~_!})b(@fXSAm!Gjnj-*WYn$szb^p=~P zW64U%$<4~po5?ZFE*$d~ESRB}0&>}|KC9@+Pn|H?zp`uDASsFpM`n6Ps)cMQJC&@$ z;a8IEDS?ej#5oiE%!RXD2m@AUWw z>38VwIWF9sB?okSp~f@IfB_aH@8?OGOPs5nsXUDO{3lMLQ4C|}0*ot`sVVX{%{M#< z%M=~Q&Ro)by9Qpk-j>=@jq3$~<9pWlJA#rk@CHOc|3Qztj ze}xo}{8c(Q^~I-i4fn7C1{;5zavk0R`Mc753fTibo|8O z+a=axlZ(c_HbyRvi05?M_sDSe*CjUWlxTWUqIaT1m44&blB;wSD!Jl~LFINcSK(T@ zo^_1Mf8@0?g^@{_=|W#j9GU3M5hIh*lXU^r_4AzF_6-~3J2w#hJ(^c$6>+Y#?co?77i+FJ`O7Ddd3@p7h^tSA!8Gx6BN!YX8=`Ci$p(jiX8N~ z97xiEkP{FjG-^vv;>B3>fmW$Ed6j7vKDXX2*?*< z(!n34M`>i~t-7p3d{v@9j=u)lNUz^QZj$eINrY7wM1{t3Hy9+N?fBnNFP^#Z5vQ)Dk(+ zNF-7ty^x)xLo>O$j;W84ZFX#Fe_hFlQbZnN1^3sEB}_&-Wbv6|8Ei5WF;1OeY z2C~LfX3}QIUgU2RveZ=wL8HRhfS$TS*Iowxr*-hBlB;VumFLv@BVRW(m5H#FLL@Xv zIy=>&~Sotiy)_WT+8bGqW>z@L`KQfA{8azSQX?2M@trw5M7Zg+fUT58f7l!hlw z3sSPuQ!}uRmYFdt1$&Um6jnKpM_x}9mN8f8Wv*~5bA`1*^73wt88KV-#aFqn**g=w?*u=QA&4^f1;k zHZV3ZwlRiomgVZgXl9IJOki{~&d<#dvobTp0*ot*u{W^5l`E!arHc8O^h1{H#B&Fb z>qzZ^aM?rN=!|WLn7i}ZGH{K%hFGfKH?|2fJh^i=I=;D9~6;}{*(mL%FX5l zJEWPDlAf57g>dAMekL*G5$H|%`S|2nLd4@BH*40Y6c}z$0x*m{rZCV`V|@a0jEMP#(x?h7hpz|3!JWB(*?rQRgsXcOEhbi>B{(i zF4M{Og^=mdbi${zgHk$@sq}pgiP5e@J zSGwiUU?j8<&wQ;u(~d>?rCR~LM~W-;`(W~s@&&NN|2Wcs<#&Hpq5gg^`@N4Hv#2xu z-Ha82;J=RH*JEu#{b}N#`wL9rM&bW6U`^ql`BU*N|IZsi{`wnly7`t{Z!5U{jtzI- zwejx4d+xn&)BO)TSoF}tk39O=<4<^sOG?YiD>i$#R90=>R$Wv3Hg*bhJc)PDTuUw%Dt@;9LwLPEnjbvA}aoYkf4+1*Utd-Uve z&bjBAd-plN?*$k3v-BTu(ZE52hggRW8$Kd(E>NmtHmjdr+6#oQZe< zVd*kg>hcw7=^2^sm08(2xvN&^tyz1`wbxz0#BqB0pVr;*=fC`S*Z;pe|9@Hklc&T^ zoi_an`;3`a#?6{NCw}g{`3n{3KPiq%}%% zX6AI?NY26>%a*#cG7n=O&0Ot6*_hM)DLFfHTKgpz&pcdGk-(hRUdcI_(^@Gx7jv~% z>Sj)BtmN{TpDn4#XRe+DD_~Cdz~l;<)7mV#BIZ3L6&~h2nR}V{VqVK!JttJhoYsiR z)ib9xa&isKdrK-BnXBixnwYESq?(!2+BCUV=GdRnT^sWYnYS}n&&i1wWP4gzZe-q{ zxrzAz=JnfT_=}jESw4`th4~=nR_245M=>A5Jes+>v9>X%HE(iu=EEfw@ytgsPhcL& z+`*ilEh6V)K3-DcW_}6tJm%5N^O;}Dynwmf)rUPWzntYo%x%m)%qKJVGLL0m%X}*H zI_A@v*E7F@c?0tq%o~}{WZuO5O6JYX)jY42`5czFF<0}#cIJy%E_TZHRr5e2^Q&2I zVy#0$)=PKl ze)cTpMJ(^a+{?Tx^E&2dGjCwtjd>Gu6Z2N)-I?ce`Fk+;Fvq)Tb=S_}@zHSI8DEt8 zK8LxP`MJ!k%+1WBnfGRHXWoZ-0`v2kyO{T7p2z$G<^{|zWM0G^zYo`)m$`*`9rOOo z8<-DZ-o*SO=B>;JGH+)-h`F&|)_*W_GxH(Lt;~lqk7hoMxt;lN<_XM4Fn2MJWS-A_ z6!Svn^dc5=9_Hhi*D}ADc|G&-%o~|s!n~PzH1js*^f7PUy(H^<8FLf!3Cu0bW0*%V zznr;^xs7=|^U2H|%ww6mnNMY2zBIeVXdzoLsypH({<_*keGH+sjCG%G1am?G9 z&t`6XS=N6Jb2IaJ=2qtOm`5{T#N5t&G4llGS2K4pPhpgzg^Fr>=otZbVJe+wG^9bgx%)2meXKrF{+$HtVlew9BFXmR}=Q59Gj*qnJ&d&S- z<__i~n7f%zU|zsHhItY5Sms{lE11_a*LXtJ$UKyJGxN^O+nD2b*V65l^}~A#bZ27T zg}H@!ALdcaCos1&U%@z?USAZA-7atoyB~g!^>540jUf}`{U$jU6dTHf0E1KbTgQ{nY%eW zJvBj&R-nnza}(sU`TjUttYl8B*aER!j4~Xppp#2QO~}!zF}ZACePp@P^i^{1+hx2!H zI?Fg8+MOpyE9B%>=(_;)e3a6Ml&7=*GOi!#c?H7La|slZo*|H$qEu*Ko}PV}kM(%z z$UY*)6rt{a$W9`~G~c>5*+-;^73zM3>?9Ja@9O@C?1lI=PEYRYU*TKlCVPp*u7bLs zBD*0zkHeGwL_*5u4*lhV+p)4P9oZAf=leK4H8BJGD$XrJoILlU(V8>@yOjD2uu z**kHiKeBtuPt}j?KN6wU{VeqZsvmE|A@o|EUyc89U^#4m)IS34hWbe)VySvjxJX!$ zvb#K8FMhsOkMHNyzXHQkKZ}Hws(Q(%kf!_EjrtwcLzO#SujedZJ?Y7vK)aC7$<6ZV zk@_j^&nP>m{z_c+3+lJTRsPg}sa=BgK>az;eiHS1`^!cBJg}V9-vc@I`;J`p4^=N3 z2Lk2kdi(p+r|}_Bo*k4PjTh7|D!nAVo>#!4q@!^pupQ-i;!jbID`at3=@I1kLMciM zTZ#IRW~Ok<$fuzE?V7H)(`=uf<#;qh%;o&$coOUDFLIyFUp~4HQEmwwUXCYIeC+65nx}C^+29`sX z-(OEvu6Wk4ELWVb-^g;!_mxwYD=wfMWN-d_Wx1~Km0OidwU-=krv#KU9dihO&z1R4 z_1TZif0l0?Q2G1EL(1Q8Z!-T_-*_kU_m`KR{Uqz1jQ@e-TQ;t|R1Vd$vK+H~<&^rL z;j=@TPrOeq^O?o1OXD`#*{SoP5kOg}%x8hmeq=uW{v`96>Z^ZXx*gj!T^|WLmVXX< zl5{fNnE{;iu*g>*sz3QC%aG-|$|slcV}0X@(zCj*QhaKOj`7tfLh~zwXhQ}}q_IHKyKuJbbe$pQ5z z?JV^53n`xx6hA&lKG$a-GJQ2_%K0`~o4RgGxjn!RD8JbO*L~vgzWV#;`SMCnb`j{* zy0IGf-MlV6f&;nVllK{NwvA^Lc^^Zik@)8bjXMPX!1m1q?%->+%zb`h`h11C znfZ3+R_5<9k7nM;+|FFhmlK%Rv)sk}ZRQTHUpVtTmhWR;!2BcTMa&N{_cDK-c^&g- zm^UzgiFp(AcbK;_FJs=$d>eD)2T~u0n46j3$K1-inRztxzcIHnf0KCv^TW(t%s*tF z$NXdF1CW89_iiY>bG2h5?7l$9typHAR%o~{B#Jruu ztLJB$Sbi(ZTUkCqahBiAypZKlDm|7z#@zUk)Sq0v!*e*yb6IX>zL2^1c^SS3^Jtbo zz}(J!0rLdr4>GUi_&u4sSbi7t`WIySc5BFl{*OMVM;GxN>N zt;|<3k7oWT^JdPkH*-77uVHTC{01;jV0kig7xSIW^O)~qUckJTc@gtE=3eH1XI{tr zHRi2czCO$wSbi;YwXT1Jc@xWDX5Pm91?I*>QePD+KJ$lEc+T&9=Fu#Fn7N(#bDF6M>I^OzSiFJS%@^CISz%)QK~Gp}RrW-ixf>6f_D z$@^U=?&AY_B35|=c{1;MsQc{{-knlBm3Ma(U(UNKim%|^6UEc8+8>y{{}%_cJhbK! zD9^+yL?B-oB&XHVKsl{;26DC9t@2+JP+wa0l&gYC0qcR%NB&QPT%|z!QjsXVN?-27 zsNFi+x1#k2`n|1ma$PDBDbQ~~{pra48d@EYPVUzv`SeU>iG-Gwp5*>bGFH2#lj~*j zH!tKPx&L<_lGFdd@boL}j$B;_)p~F`?>fo-pk=<*2f1$J#453Lk|+AgE4kXORO>=X zzVb`Czdn+u`pPHya$h@3o)MJ3+h>e_O zS~rj!@^t)mI8dJ9YdrKJvFll!6Mc>{fDG9S=-D*JsyKzA7L+T5qR!f2i=Z|3RUollxJrK7GplL$&Kn z`_8mqr*>QAJ~ZiDr6<=}{r01{8iypO|0+LPhfVX@qm-v(zg#-GAL1{c+^_NXOSymP z{~eUvhYFNa`qXcwlk3}RSDW^Wss3vBoH+GYr_X-nexTaTmiy~8E+~D;{SJSBm0az< z%l-FAtX!*cTkfMR^XW_Od#&*G54peOFTdLVQ9i^YF?uNd$$hdFzHwacFRER2+Fz&r zN448d``5Hj=kJelpHS_plbrUSRBps+9Hjr!$^ETBx$JL#Iq`I#{mFem`Y)Z_A68Ev z$mbCP?MLnxsizc3PWcChr}X{ff!xpYmq+e5tEU?1Jtk!D^ou&_Bv(&6-~+b2U#3P3 z#Z`}2T=ht~U#*^!kk3iTU(w6-<$j;PJaRwB-+v_c*GF>tuj11_sCwE$?Jv`tACz3~ zBd8Q;U7xN=Dh0Jq5-6vAlR!CXRFz2WEBHC>r_);xRDP=e(A!MtnFcA5`y9W&9!mdg z=>1gTrGMb{Q|{}j+^N1~ud1}fY2TOLdZ5ZfWuOpp1eE)=etpX~h6Hlj|5s0`(DNKr zd%yk2=K<)ybaMY#J^ez@iBNmcuQ8>Q&kMMH*KfI>DE|lQC(u8zzH)zm6-ptU+=us< zNA4TYcp^s_KbJj7aTQ;3m#;jM(;FV7lRVouK1iP9yMD@je}DcQO^BAW78rD5N{YPx zHGED=ceXFS&Y9l$=!hq}5Ow~rqw2-7Yj=G+G)d0$lb<;;E^PPy+m_yY{G17CQ)L{} z`-Lf+ModXOpiLX_@OguW%%OW$o8kV>FWtUqVBgI@zp^a7A^TwZwlA|Zity5#$wNw# zUN@KhW6+=Wz2W}Hp%wjNzIbejoM^XXoZR)s@-ELW9nA%tT<{jUk_2%xIhn)50x?P(FZJW`9GVSxx2MeCq`=axzgH=`6 zzSd=G*bmFjeSY7(nrD9*de_~KO~WXE(Z9<;iW6h#1Ph8Py{8Lw6*SzFlenjQT zHrx9fy^Z;OR$PROQ*w0Nmqw|r zAGzz7V~qpm#ap{Q`~7!47x(Nn<=_q1UHaZby{~HT8-1^Muh-MJ?pR&E)_L;T9?P9r z$!zIe!Ldse>p{B2WHZ_qN1n;u)#XH9O|-9j9GYRAOff*bEU_wqMZT(a%pGTWR_F0VSC zxq8;%&uSvb%1omdR)z0R=u+|b!Olgu-PCvaOUG`@UDY~u#9LQ*CO;l#+!NF3IZtcD zTc0ni|M~D!Q?-B8{@F4xdr;QC^~dX4s>9CdY)hQ5cFuKWZ;qY($(;1`ZvDR-yXezJ zJzpC1;DIl#H-GuS+p8zcxUf31-xZeVZ4c~w^XEH0eBR!C(xA1GPW#%L8iyy3t zI9aXzsba*;)caq&aA@D%uT}1Uw`tXD==5}6tg@V4gWad+!YE1CZD`v=cv#{=8YOAFh0*f8XQb>!;0J-c)&DUEE>g zqvem}ih_wPa!t>6?>nY%Lw9B+o4dR;{vO*rtWJo1ysoTeQufQ=ON?D^o1?W6xTWpdo{_tL^PG}LKfpRD`(!Fju#yAms#KDh1UFUHn6 zU2U6QczAEml#%#amq6cU)?@p(t*P(i$;C^Zg|q%tdDwo zYo=d4?7cS+8it+xX!@IX4eU8&+y0B5?ftjV-NW~GdiM1%pSsHxG5KgZs_&tizj>j`de7HHA%}(jDL6Os`$e@5<5X}&e~;tBDDSWhr=2t zWcT>+@{v!Je)8B;&QA^|4SPOx@TN!JbvqvzbmZk7xo7q2KjMOZj^#b;N+#^TJ^O}l z9fuCQ{_5~Ushuvlt#07go$g!uWyXx&nIGPDU+#@L*N=ZVqinU?mn(Bcc zpG}RFJo%-E3*LQbMC|*0KF=>1IqAn0lh(|*F?P%6Yi53SV!*@h_nQCl*6U#h%I*7~)_P(>LZc0Yd`-}Hp^UTFZPxef6&fVST!{_dQ z=CRq|7RP^n{NT#JBy}GC#Hf!S8$kI9)9o)@KXK6|tB*vN{(aJ%$M>%v@%T?=IjqneRl&PkU%EQx&Am0ZxG(Sb*xTumtzFDFoR_&N_4>mjHYVFI zExx~dbGLB`6VKbe=|}sJ7Zwj*H}ZoGxt_xO2|fS%VPv8Gt@J;KwhVkIUe8ngGoUF$ zFb@h17g}gnp@sDjS|_tGbh=O&Iu8;CW26W%UMxbwV?;=VU4%x=6QO5aCqlcVim)!Z zBCP9;BJAu!5!USq(WzUt=w#X{I(2_bbnf0PI`{ZabnY3Z8GD*F<2jL<@!VJ~{M@Uw z@blbSxEbFCHhZ*)-aE92K5uIg=YOM}b$+u;h;-%+SP1vS% z6ZsDm;X3+5n0Wo+2~m)bzKGrcML3sU0R^PDKS_U{r$H*6xBhN@TdL^BCv;56*{ow! zM2(IICO@s?{K6)p_S3Z|Z2?@fKnIz|uxte(kb-I-fjrzs^5t)zKDL@wV>2!TFBvzpc-^ zI=-+^N6~QWd%FCy^i@`J^BH0+fP$JB+Q@nrH=JK z-F;B^KRMX^}HcuJ(nVv7@t=xy-uUVJhZ+uL{GDFcz*{+z! zUoCjznpdxnd1%z$1tSazF;Vk-T()*sV$6N(vQxKR>x?;i%TrG-A2chbWbN7=&);!P z%)#=j9=K>+X3T2sZTpGN88JT|d+3ToV^+kp_xV&*L@z2ol1D`I}#QM0n)QfEx^n_B0$-%W{`(tqDm z&wrjClYH*%qK)qKnDL`tJ31|OVax~DwD$b*6HCmG1BYyHes*HaYtyZthHlM_**Rr* zb+1RRj~Tpjc1Y`WD`MWh>Z!wvUrLWjeDuBL;b9YEK8kzp=X3jC8*{}q#|Q0QvNR^% ze%B1+!&k?wUNZg3D?Uq)x!~5H{&fE4>tY-a*52w`9ut%NkJ>MCMP|$+MZetGqx0C9 z{X4(i75k+l=H_dk{;E^Wk{H*Lrzd^(?d+KHm3@AReQXU#^5V{AF_-6k+2_gI z6Jw%($U5(W+pmtf=b3&xO>>fB?)&oGw+}y>9y4Y4mfph_CB?)U?LTb$WI@c@Jq_1> zdS^WAAu8s@A%?%j)TYN2)jnE#&9ly!0f%-}otrr$Cb942hrWM0C1&CLAFqm8kr2~k z>)|g(*yqJeTyyb1F8w(pCM|Q>Xc|9a@#G&p6S)vi{0(H|nEtoLPCD(1V;u&^{5DPG ze^#u8mr#Hwi7JjwxGXqEJ`OY9O(aB(` zPPTv2rOpSgUAs0p5$Y4-d)8-GPSU^`llAnB9sI7A=>_^Nkm1Z7{A4%_{M@*mADJif zEj+1-UdV>qa13Qx*3oY)`PIQMpsmM|AAPrSD<(A@cRcwC6Dom;#YmaF5Pb~724#*+ zE98gQzoStaE)j3K zmiSGLHx7hHZIpb!bQ|VI=AqL53FC1%0(b=S?2Po+BaX};c_MvO(|Gu%7BNn^3H9_e zgliv$j|>loO`$&#qMW|?=i~f5y-Y1f@VQ0f0R4PGOMjf7!{xx;X-1tbY$2aS%zT~oIie-t^pE6} zW+;~z%Vc_*$WOC|v@gW)#S zu61bF0^~D+>pxROnGQE1#4Rm)yEi~S;S_#jPT|*GZwtS^OqjLwrQ=VeJ$f&>%}BRG z)RvK;&<);YZo_w&Y3sb@nBU+QJl9}SHgqv$Q6#%f+EgrT3ZK11 zSienCPS5Ay+fCR=O-8f8T%lP0uB>gHwPY2 zNW-Dh2$p*x+a4hADonn5D0`|~FGM2_%8xKfI1pxE2-W6MVKaBv>$cz56@&9=N1V_b z{bj%_`i;Ah*5Tp-EnS7D_u=(Nj)b(V523am4SR`AwIRw zW6*V>9$vP?b12VNoy&p2iZHW<+kE(@{hjyeLxt7XPuAgKjyfF1byPDT(xr83N*B6D zfA!l)V=3&T4E>73hG87i5Dx3u#>0p(9OW=mzUyf?8r%|%w0i2i<;ca912#%{=So-*PtxtQ;bj`fFQajw!GIB1UMHF` zUbq76_%6iH*Y)DJ<7D{B@m5*Rks}|7c=JBk$l-kja)i2Fjz4tWnR1FeX6kt~>SZXS zac$r4$F(sSA8;M|-S`mqHEbS7Ew>%6OD5PE+D_WRP5Ri@u}+IlVQcSvhIz{8e!ahf zde!Oq`RjTq(#ANBGRU+Sp-cmb-z4ps?8$(7PyAJgM{y{7dLHNQ6Ml~Ck*Vt`;-&qJ zYcmcNuMX#p0X$PgSq|@y*uz@gpxUVjdOaUwg1jF3`=i^aiJNgWp27Avou!Fd9Bmxt zVYJ;KD%&j`#{#O?!LFLP^K8t~ZjfU^-)@?S!l8JtAX{ufx}po#N_b3iLZ=I5y?K;5 za^xWLd+Ri|BCksw+iv=4#;NBAXrdj58mIQ2L0982FFKtbXP?IY;&i+1cr9IWhCEJJ z&h1v{8iy+91!y3SM|TgCWeGLau& zPnU(pTj+f^r>plb!qy$rPtgwt?JRwV<0l-NH1#84TD%4Gl+(r^j(TAFhx}w;Z-q_1 zEaEM-XNWTi{Q#eO_Vp*#e{Q}?6PBwrk-+tzi1P&jJWE9B*Atq{Qa*>z&XV ze77FDf&0&Q>!lw4cYU}1K-gk@OmKQ;)_i=vGA%`Z#7|5z;ByM}9jaJ-3*PrFsv{y+ z|Ast!OvY0o6mC{#a&DUbm5JH-JOg}>hT;nOU=%)7nBme3m^>{n z32$e|XCn}ca&FX9%W>j^r0y9ROEVYauH9E(S*gWBTsdvt>}hdh$Bay-Z|Uw2otKlg zFk>~o(SR>oI4OJ_KCC+}D~rBOJDTzf;E!tl?`=Fz=Ljypsqq9c-jp{i^{08x)a(?WLWF2S+o4U<(KN|% zsTpYI_k(;Ei~Ukx$x=1=K1Zj;_|}JR86wB8IcgKK6zHxu`F3nu;m`s8xIM0}ERjacnZS!O&f4=GoZFPY2aE@JVy+Y~NbYe!o~j5pbb zinmzIq4>J>;Cq^4dFVWt3sE6X%6jY84bz9uGF{3xjg+Q_z_{GAtWx-iFVO7qSycNj$e2|sj5Qa z{NGjqU#(4tk?J}TH|XcnQ}M25s-&L6M%`~VmN2D8ZV8!@jZ&m0>th1G87sDvCML(m z3-KUnYtFP;biPa1Pds%P?1s(4H-}TFtjWP=TNbA#r^LFPS>j}vI$e8(M4gmv*W(gqw#!~~*J@stwU(1Bjm9vC@EuH@xVrzfk z3_&i0ovJ(O5p#BZpGEooGf4JCPKC`E5fStZfO1M!@0I&u$_^A#xWIE2?{wJS(_lMO z`2TKsaJg;0PhDPHmDET5R^BHpW(-c?4|rh@PQRDL*gpvWuZvBmbuw+sY2Ck`_y2?x zPXFNsS-R7ISm1v`>Hq7>a9Xo4$vjVM7MB0IGW|EE{C~KaXa@DhWvRom5bRIhxaP+G ztkQ}@@}#(2;`%3=v2u$w;Xmi==tcOKNdzRli??G_`f3jTO}oMkr2{HGB>p!rFB_AyRV9f8szuok4^Ksmn{#ll+iO21xp>kaP zSMe|G4syB&Iou*3&+n6XC# zH1@_?J+K*=kF`KcANT{!7eeR4BkVwWj-UuQR~uqz#p3D@Vu+#Hg7U%NhV@G;kk&CT z70sdvm1524DgFUD!(}|9oH*93 zwAhdunlP5ujGILfa1gK_7)|MnljSX9Yy>_}39$y{$_G+88-NQD-*O54 zCN9NL3oO@C3<;N_e1@flsEHKb5rXfOBYd+kVWGOe)@rcF&~J!Zv^HSc4_G(99Pz{v zt#GmsTf|aB^c0z%7x)4fy_sM6GV75Vs(l?MfkD z0UG0Ig~6#60-w>K@7WSf@j~2hNHP@6hrWP~izvS%8m&>BWB5cXx=M(55zl=!^o;V? zUQ78UYOd=rjv$=%dZdGTIF`U3kbi*#c1`7SQobn$b0WU;jr!S=sD6&nI$#&b3zw3f zoLVce2;8#_8}Nn{gLOICL!wp-oNM^p043tLcgP?3kk(=_W>PyRY6ZYr#H)4VUJB*N zUrFgDYE6t$S^Br|w0z(_@Hb`4bn}2KQLa{{_K((sdZ{U3omC(k$?udA)zYRFRHTiwBauyMd&aMjf46)Ou;B2|%(V z53m6FGy(SlEjMD17Fc+*5MKf7Zo@hS&~ZEL0CwAY2kt3;LVY)&oWP=uuy>&89wFXG zeOrO;l+L}VpXLgSzYp<3k9KN$P>64}qah6sp71w6V;h$ls})(ZR@?bL+pQZ4-3fa|GV6(}coBXE7l=OOWx6#j^2s)GLEZ`(@0 z;dg2t;828bU~C6IiT>~1hH($!3aVkp;)o%-Musl}4nn`Kf0D+5L@nwm`i4Em58w|X zH6;IO>Q9MU8*nAiv0Y*#@D}8s@C>yl^aP~v&3`5PJ)#voOX(efLSZk+w+SdPetLFb zoCFp=NBKJpZNO)cuJ>;g-l>_Nr{4r)oB*B>%R1%1Anl-GC;BnsnO>xJOw^^-qEDD7Oj1ZCa#6Nbv*W?fdzJy2N*R&)5lpm z@Ek44;FzhQzaG}iSE4*_GG%L)0Qo zyrQ*qvL|SIKPm#g0{uI#g3Tdb!qu8SuDPz!VE>q30G~m;ylXYgJ77;hBj#P+>#1Fx znso`ak5fwkrl4H)!0V`dDVl!$Zw8tX-jz!F`aDD|r*KZK5cmVyKR-=FSHSq1uIblv zbA~3yQTR;MJM=R{Q8wkYoRwFJu7!7u7mXa17R{F zJ%>h|p4A$Gagd$`xEVG=!FKN?8k5*YJPqTMno62+x71YHT-sXNRw~L&WtOt2GFw@EnWM~ImS0v_ z<|(T!t1oLTYc6Xm6Xm9IOLVXClHL{-=- z;wu~#?uz`1!U|7CZAE=WV?}dCTZPzc+HBbzwb`~gezRk}N&_1;Erv$xGFwwSh9wnS~QZHeFF*y7%jzol@C zXG`st`YnxHnzyuV5tXJ&OJ!80tunsSQR%MCuPm(eRMuA3S2k8QkgFH@)*L`*A>?nHxxG(Hx)M*w-&b*w-<{N zV~MH6Tw*D)mPD0Am)J_|CGjPW5?6`4B(EeNtz1}A6wuNQC5_zLZ6)m`qSRPwDm9l{ zO0A_)rO~CfQhRB9X+o)^)K%&(%`43>EhsH4Eh_btdP{3d>q?upwQg(QW~??>TdSk1 z?bQj@uIjw%g6g7bZ*^UDLv>SiYju0IvBq3ut%iqTW{2*`jSN#!J_L*8c@Sl)Xs>SSy3xHYUDy~3Q!X-YSB>I1UqjpHI|vn ztYy(<_OgUBS6NM9y4nkrf=+AEB(DJv|>4r_94&SP7u zgB>+(Zr$9z+2}QUt=?#_-J9TbdGovl-XgEpTjy=?HhEjU?Ox*+^A_ut=q>gw39!37 z*jy3ptq!)<1UqZrVyrY*S}UU~?Uf0YuFAa1g36*wZ)II&LuFHCYh`<-vC3R!t%|O) zS0z-rs`9D|s*0+-RdrPjRZUf`Rqa(q%wa{n91s4hdRtqis4`Vqs-miFRq<7hDtA?W zRbiECaT6(6JO(~ao6P66xMiZYHR9i8f%(s+AvQSjhIIRgvaEuptsu4 zQyu7~`RJh@Ppzlk)97jTw4tw>&`+b#N8`~y-RPTz=$EzVla1()ZRm?8^us9hLF$2S z^u9v$yjqN}jTm3s(A!MtX;J89@#tZ0^sYkmtlHB0(ngHDZU5$R7xih@^t~Gw>MYvi wxy=KAlR^J(NFn-3dx;r6BmsRx_VHGf!v>3shs_0!cEMvEQu&YjKTrezA7+&aSpWb4 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli.exe b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli.exe deleted file mode 100644 index b1487b7819e7286577a043c7726fbe0ca1543083..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65536 zcmeFae|%KMxj%k3yGc&ShO@v10t8qfC>m5WpovRhA=wa=z=p_%6%z1@blsvwI0vv2 zNIY4alVK~j)mwY3trY!Sy|tffZ$+^cObBMdpZutbN^PuECoa`kXb2K>zVBzw<_Fq) zU-$d^{_*|%@qt&)nVIv<%rnnC&oeX6JTqHy>n_PINs%4a-Xw9jfY!Ot@}WQUBkK=MqH|Mf{(O%J6=?F0E)R-u5-_q9XB5EmFjL zRMB1HZ7a&fd)b}0hpCKjVjS>G(qfxk>Uow`_J8Y;?6yo>h9td;lqFW`r_=Cu;je?@ zJ}aCeNvRaYzy7!6vsuJK8t7Ip04X137Vm)`v3N5I`@q}=|CK){8#_3 zR`1xV;$zJbJP0ppD|Paae;!F%bM?lxx2d-wfQV@O6ujTW-;jSkRCTolCLPMh2Nx=) zGP{NVA?TB&mP=FqZ|whc3RJSvJUJGyHOs!nBiePA7G%%m<=|b-UJ~!-boN$bi#jT{Hcy&A=Niq?KHpr`Y-?=MzKk{I zIl-)f*v>o`q`5M7OP+gKtTfLZsOCS(qPDr~x8=!_5`6-VLD0EMY5XaI$Uqq@V-Jap zR-V}6Ja=V~*CHdz@F4Rbij_JtwPEG;g{#zT!Uq*Py$3gDv`Z2tYF|X8 zYEi!^3#I2mi!9?8K!AuX>_C;=ltI=m5eE7*@I4UZ&p}=3ho&bc^h3P|C;`K|s)PJt z@!8GLOb})@Yp*SMou>fLhC@WZw%7ar>1Sm0aW&hPm&@Wqv5zi_&0GwOEjRhPMrYB*+WA64e$@ELiFO?ay?gvgcC1!dbl2?B=#{!9_2$Llg!~3%n@58CG`RW z1LPlkk=p2eFSa3N`&F?g@~A1mHitQyVq0yNK4^CN8joui^5gTpuf^0f+qMtEYVL?F z$fu`~#PaZA)VQ4Amx;XbZ%EJqQT~UlXZwx7HHW!>vn=MgCVU7v0(=qWSe%!~9KS(N zgLM=3LHzO$mU+*{wx!#)wXd#auhgvU=lF&*IVnT+hZ`~0nCHPOETKA3I;S!sQ8$^{ zZcv4UbEsTEpxvZ3yazYCQD1%G)vA+(ndH~oy5$RmDNA{h9?j)8QlvdBd-|V!63d!_ zr{P-1vS(7D+|itM9Rk61MnI+K~KhBa?C)KKh+E*p-K?e54p;H z-uNb0vkbWyR)1lbnp%G$OG`vjpo}PU*o}&pp;`PEODluTuiNcFBFmELneD_AsyG+G zkGm*r)oMJHmxrXL#=Plxfj%;6&nXBm)d`#6i)km>UtDzrb-*V{hPU&@;WB&3=+ zxL1-^s(vuM%+x$5wc!b>TMmX_2j=|8Kt*)b-4;r#_ff_ny|oEKpX@DE=!THWD9l;8 zEWjV=HO&BTAtLP*tp;IMlM0_Vn8(sUqI$?Nv_U1G^tEZC@of=jxa%BH_{Ai!MYo}y zE@)vjviC#f;TCVZ=HXtX$EDFgCrJNz+eAX#tsgc!-#{X?u;vu7>K}|6xr+Y+O$ixV zZ+D5)r){a?S581&?=jW!dQYD^njLNZDwQ49Kbq9~QJUTP@Z(p`mlCNjK7uj2dw$*y z?Fs@NOQ3Fcxb;G+-Z81QBhBuJS%CWlpf9gp&E>m+$xzI$NMcrT+APveYg4QEVhkj# zC+2qrf~MxI;{Q2Zk_`Xps%rkG7-Dkc{@y;QZ4Oz0#y`#fgd*BZP3DWK6>a+@*LD@EZXPo+Bl`5Zw>0+GLF5OFNogis^p(SM>i~SO7+N+7^b&-f@XG3hYwRL zs{rPg^&WTKXuZW1;J*Vf^E(^LEqH+VoqCH0;~Qle%pqFtZQVGjSX7wPu*PZbFwOi{ zG*lGy6QCZdX|wX?4#`^~>lfT8wQf{0k4{L2{|oR+{f=JfFn@0V9WOeR5QLU=M!U6~ zB7d(sirZ!)# z>Ws#2b>jJh;6zDv(pxgML&lgyPQ#zcbb!!sgpiDoqu{tG6%!Ja>nvz7KufAa>qaA# z=oV|HC9oE}Y-%~C<~B7KIy+)gcYDw!`k|a8<5gBx6?_n^Hfnl`YGk#JRXDw`Y3W5Z zF72K~Dqd=&sK!kRIocXZ$WcQ@HMx}F(UwwzM=dX^$J%??vDyuV3EiM+4QdBA;io zzdv6tSFL<#tQrIPdbG7F+JhObn}j(kln(mY$%K{!!5k#)1E ziz+3WTCrR!=CNXVR%|-O_{kh9N!CV3M%Px+KVv3eg)|H^tUYmMQB9Bbm&lY5uSRpgw1Z~T#cB&t&nSAs!Ug_}|kVHMz$WCS?l zqwD<1@hy6X9b^#7A}+?pyqY#|7U^Uy*X6#P>C%ujL9h3=b(@6wKWGF78?2)w89yy=;G^09Qy^}WR?(y1w&Cj}$@F5L2YsfEL<3pY z8Z-dF^8sAbhP4Aqi=v(obhDs>e#QftDyng66L`)T%)98HH5&8BFv2#E?5hTb_9 zH2mD~chFE=MQHmw0&)Lo6u2YqKeGV1@zG*g<1#Bwv#zb_%-_+JlMrxKd<~ir3Ze1+ zy(_eP6{~SYKhV+(S~~v~1yt)79UHaSeZ5h0^WBheRNU;+TO4|;1L|kljg`GxMRVY5 zgy-B?`L%XKbD$65%Wkaf(P<|yYD*~1E|lWFafIgb%{TqMMK!$}&wwd`weq~AJfD%@n)sU_ zUiHfyy0+TP&cgr)(wf;G1RCO$+F-8vOp> zOt(p4nn%&aNx*RFpHZMF4f(Ufvk=7?JRPMYo=R06O@dN!hp9(J{WAdZdPL@b!%!G% zLqHJ$fo+g=B{EqW3P?d+m=J67#;*QZ08JwbS`rFm!NrD0j{xSFfN^d-(+{H;KZnVO zq>c^Kn`akV>TQ^)nUX?$=?!SjnvZ-^xEv3@Td*3+ToB$GLi`Q1f1eLu;*Pvh0=OLj zdhtFgHl&UZQ-JSB8KgFySnsCLa+gvITEMT?_A^wxGy~aKk5P9rYN}h!*-ueoBA*hw4DFOr zciPZ8^v@j#d(UsI=5c%~N>l%e$W7+;ycJQ_!+(R9k!HS|Ec90*HCfot5kX%T)t%N- zi~Jqxa4NIzB;-ca!0JvWei7b)=I>ieG+2$PYbd;x;wr_LQoMggi&;CG;F7fIhG-(% zJ!c$nrEc$qdPCdkvnu1mRQk}y|2ztlU(w@aFd)D-lsL#-NVQSwulrLY!m_|0v*K-t zB7y%f8D%CG3s<7iT|s_@7ZVu%+>P|Sc?3OwD#DH8xgHD=>+Hq9%@@@^GtBaXR79?>LQ?^WZ#C z2`ni`a{1lFpInCsiUb$05edblZ^2mnBP=hXEp>8aJojRG7BaJEcKD<{j}yzhTP#U? z=Aa#XBtim8=Gg?r4Uj`5WN-&1pw{2h8%&)Z;9p{i7uubJoO^Qd2$-{7c$u@ERF>y& zqN~6wdfjPB!z|)D^aBs!k+_=q&oG%~7!{|m@ca2}v;&KPJ2>;78Umj~@P&9JSqLha zzlFYP<2&bKzVZaVB-Mc?2YHnu!LA|`O$fbh{3s#N;_-HA4$=p_MZ|rGufc4|OmzUu z^JPvljA~1&s$+AaZ>O zBaXr}qS-H-6;8gFl+j!hB|&HG__QCH?uAZY6+qd0>UH`KS<+@;OtPgV@|*2uh0NaK zb;wtOjM^yvHprtzb)z&!{3Y1&uQu2YF0;6 z-&pJkNPw~TIeP9tMbGFy@$3@M*Ts{I=TY%&5zoVT@~P)d6APo+yaISwqj*6}fd26l zSTkcVuiyVH03~%8i#~&ZzGlPMWCA!0Gf#IJR{FI;?gP_@en$)RA9elZzErW? z-z!$}DeP6T*8k_BYkgYiUq~IY)=yyvyM1}}O7uIRM!^y9drD&sLd~O$*hyeu#5%=0hc&P=2=ADrQtvtr8#<-kGZK>Z2~i+YDr(2b== zcR`DCps{r;k|OD?J&uqOeF)jSt;!F64YPom7yZ+9fQ}L6K;B(=8G8lk_6m~j6~x@z zCDMtQotu#j_2}HA-lTK8dcDqNby|73nvIwet;T0PM(}dy%>!Xa=e&Wit+N2(1_4tK zJ>Ho&@F}G;2jTj!uGD5=No4gi+tKUoGxifUO6&p|zC}*Q`Nt@!^HZd-C-c2srIvNJB1pwv_RV7Hs}lRAC|1y*^It@P6dqcjDCIs;$|7}n{a0bN zwEnC0YEJ!ETa@VSNVnP}A=G&bfqB1mb=`bXK5zVw9e>%7YwwQE9vvGOqVjDG&Y)-L5pEZIaIC zt1d9l3jE3Cjm|E(KL}PG`1?WOK18iyR zr@EEK-#D<=?b9-MKLq7qL@AMpXFN*8q(*e^0F2H-_4k1j+Inw(tI~Km%BD8|oIZZL z3U#LP!ouD_m~3*fC^b0{i;`Lh@J}(6VsVI}X;M5&;!2eyMl~<&Z4!WS0Y`~eMhmOX z*{Fz-wZUowjBH+3?(n{;&a#?E?5n&i88K>u>i%i|!DBr`8qsAZj-fVnlD&ENu7UOj zcr8tPJKsdI-m^h@@FMC~8b8KU@3}+S`I1Qgj`G7<7-#jKJJoyip1alQde8Ti=;Qd- zEqbZmLK{d(>TSv1K-&|`*$o3Y^LH_kih}8`ftlRO=24yNSd>_EospK1t)P)MNSMz5 zMFbXV!)H|iohdPqaK2TlCsdyXsw|yVJM_5R`8Fcji2AR-qupV#6XH@LR3unydzvBM z4f~1F_TbC*c}(zSLwgMXgM4Bpq**9!s9VzD=qH!e1;$?DRCY2k%qp0&7j#pf$VRk@ zJ}vAuqB{{t3Z*G@GUUh=QH+(oZ~6)oG_G zm7oW8n-SZG)I^@nHz|$JLoI;48x87n8XKNR#<&=^F9+-;eGV0gPPh}0%>uwt*&h7^ zikjIJeH*WM^eCR-1*y{y7<3vkDAAj#P zqW!0sNgW>q8t;8)$CzynZ~LYZ=TGX#rStC(HZCa)yTB3evmPy_-~(OswN&RE!Vcqf zp@Gi}J#;B+uy|&hmNr=+9n;P-K_62nm1xV3H2SPw#e|IhbXfof`+6|7-a1piP-HwN z7^H{2zdg+^sM$1pNn(G@e>T6pEQuKCV2I4dULmNrfxpt(oApIA)u1V4mx*V)ZKf|V zchNeer}=!|H??#5LN6WbNlX_CYfykKg_THOR9^_2FTwuZg0(8r_mh$V#aE#VnGn{e zeCl;DfP%p?tggB$k@J+TKa!uwd@4m9VSVvf-3M5SiBUWMu?`fM{}^?u#Rg7oj438} zF(JrR5f9(+cj98FDW)K7zZihT$5@OwgKx%nE3=G6vK4Y@Bde<-Gp$1S)m91meo|RL zn<`b;MO(K26BC3>4jV6|nK2@IAd(jIpM#El1d*~p8E?Q^LTFiSdXY#}J?38eXq6wU zILE&{2PF4XZYiYgP2}og_GW_ZL=T`a(o6hRfQ6D1w{88ns)Va232{Fagx$LRq%S0O zl)0Az+ySZ5pA=~!CT4ui_9ihZH^Qxh#U26>6Z7Hbqn#h2z5ie)Ybiu*0bt+kjg>s@ zjA{aix*=UiZ)(*qFTw&sYC@-?(l4s4*jzOJb5O{H-dahv}rm2DF96vkFyo8F5}t^)$F zZ(9oMi~Bo>vl1%_AO0!k4`R(0WECATr`T9CYDxmPlhFq~FmY!A0jT?5Z*B+?Z-mztE>vHrpWqH$Nq7 znQ$bS14=F3%*>!CDalr@dER`@@Y?!6d@*vxe+Ey;C zzAb-8pA`ZV>?nizOJLlY2g_U%w^_#AX+&7PCq<)De2EOb$F4aLln1f;?205wZvaM# zVFVXXgXYER?xJ1UNedWLbhw#43pHVVJOXQCT7oAT1xqP@drH6g1K{s|^C-D8~ zII-`VG_Cp(PnuTk%;)M~Y9hy;0G87Oi^b`fGFXmJv{=-iJc*G;s){U*MNc7w4PZX$ zFG5NYGosTWBeCdAJRx94bOr)R^%*-w;fF~?jmJo-7}k16tTxu|e7FZm>vqP@h}UDJ zMb_<%9ulu7Tg2PMX=bAQTgbqx%Agz--_|=gN^3-U*{nC`=`o*^BWB5aoD5zDc^L zbCPah$}ndW(fDOKfCnSmYs?O0|98q>)A^t1Kmi5fV)^NK<0K|?>Ztkpg{wAx87u#* zeqqFx;gPHrpt<9XQ}|ZXmRbrVBf~@9!{b|~w(2b~o%2V>(ripi+vjs*FBxfV+~`j# zwUV4ks{+SXmd9E1#@;j=6 z)uOkr_4gLM5-{%ICcH@ey-Dse{MZBUT1zu282Bo>*21v||3a&=U&8)UQ`x`eDO#(a z$+2t;o8*GowEI!b(%StdRN6V}iP(KElBg`U#9@D{z*)%O`vf>Iabn-XiXWl4ADbAC zbxL$JvcOIfTh5KDUbfOny8snu^oxD!YWTy%94p!42i&pJ2V91~3)1fIfdSdg-sO4d z0#s^?wrun5SjhZ6>?CT{-mI^K=Fel0?4c+GlPClQ3ODjHfx-kp8?Z8kIzIS{LZ2kPIYA1qR0t$ zn7?WzV-v+FcYYJ4Hb@syr5~l=QXFk8m(jW!w}53gPr_z=9*MvMv}fS8675hU*yDz=>Qxqp`&p8$PzafG z#m<%=%AZ_k$Zh6-SXSFN%1V}W(ZY$4no;C;s{g~%TEA5qZDWZ>Vk4~|HI(T3pO(1a zDly^=Z=limT__6dNkqFHhpOr_vsaOh;YYEgH_}4}xWc;# zn?;DgBeLc+Ou7F;1!12zVqb04b$E-(L8Pvlop1dlMRsXK7|7O2c;w@PH!A` z$}(qT%e{);@wHLrOr+~eoF4r(b2T#R>l_%jYgt>r>5{5}aWNyvNppn~*97@Ca5!n) zRB&u!64`2fsMa0iy>Oxm@QbJ?bpB*$d`r@}3#0zCM9#0Uq@}4Awna{XqNUUrOuWc% zslzKgZj_jgN(3Qdj%SMs)!HOMgJ?$SA5m?n;P?V#d2f=I&$4o7cdM>mQ?y*xMg;gx zgc(g7CW7dRu|;*V=I(Ayq5ilg`3a_A7|!c@Ic8!~S)viH$y!IUBc2WN3Q-Bvj^$c3 z5`_KmLmGEEV1Gd_1d=iz5E(tp!M007t}T351I#sty)U z+#Si`84w_Buz4?P3V#KB5SPf|6%DG44C5i97KEp0qBcViqnfK8ixAqFYTieA`GW(w zAaRLIV{Rh7ntx26`gie*R0Z-#Na;r%mD}%<5Jvs_7s90pggwVaNJy z;Gz5ncB#LFXNdQ_W-sV26M91L>)3KHxJ|5fbYYy!?SjKig2`8l{-`R#sJ z{y|JM;N@7?!z#|5{daszTz&pedK?9JQ8F;@qU0|0D_iceAI?7tSL#Z>U6e&#kwgbP zkkbtwSlf+Cu! z2^i*I1ua#Wv>X0&z_aSn73?s&*dqlVd-T@)W9p>J$FO7ZOZr;Fjpb*IiZ0VIdYQtLL z+vF=8tIkQ-iCW8@Pz=4^uQuJ=>}nca<}1w6IQAlU`d|lyHiM6o3qDTHh2A>nrl2_S zA+q^%P|?VQl|Hvwh66uk?P7j%C%U{@zVS76a{Yy?)f|yCw>|CZvLrN|l>4FS+vXAI zH~1Q@M_VFOIwyh-O%sQD3<-Z4nfz%+pMuT$dA}3f(Y)N_dKL78sm^jCQ2QJXENk|S6i>1Swe1^0VH!|z6vhVJ3d~qpZgqg? zzXJ`{qP%dJwHn(Uw4c1)+4_+yvo*He^{Zd~>O~p~F~0$D{+lmT#%8yz$>m$BosT^* z0nr20&}O%cv?bbkjJiUE8qVZG$Ol*3*xZhC4DtbUv%|~|qj@h=J~GK)1f2?6ni^AS zZU9&Mjpv%9p98c#N(mlVtgend_5~7@=MO8-+r5XkjLvWM1!50n(f5dF84tfLw0Q}( zm*9+g613dxj758q1+@iGGXVyKBgR-iD*K=c=}3jXt{(VYjZ9Vis|CbfrAYwv)gXY_ zQ4v6I3!prr+D<=J)7@%Qhu1Goo8W5RnM%bbM$r5yo02?~go2uOrV+Uka(kl)NYvB= ziJ(Qrc=R;N`2{d8IC6yuvxg}q);OGU*^kC<_2?JJZgJKx9*$a$VY4ft=wFT9f@+7O zj$`$od74}ad%Gmf_rA69AldC`VZZbwE$pF`3rQ)z)dl0=BiP1ZJ-dY$-og#)1bxSP zNgczsgfSnLVGH~D`xwSpJO32GZILW~7K4{qB>)7j@ZQ40L* znbhGjdU1BZa@I@C(fhvEMh*p00h0JY@9QPky)JkP4t`7= zqP*~?>!A&M*52zWqxiQFifLao4{wB9^g%?F=gS~0 zM>_u(!b6Igk78KGX%zF_BQvo$i2dd%>Ll%S;>zYS8{}-d^88%#^8m>@n(H6JN4eBH z0j1d%dV4m1hFL&aSv{tK$Ix%EF=8gH*LA?R>-5G>76)qa5?U!q{5zOkM$(KDXRO2( zGaf}bx2|K?&R=KDobU79gq@AE{9S-_z5ubTUu>V?@OfJ|ccbj>v{^6CO_g}6Xg2YP5?z6EY1!XzyS@qf0Ycyo zuOK0K^{@C^(P8ojvDHkzYo|CVWwttu893JrN%fv?GnumQA32}vG6{NITX#smVXGT-f&W{?OLdm#JQzu|LRVj9_7JPjAE=2mf)a`9Ab zAy_6`@*nHK5Zl4;M_QX+{4AWn;AI>6ng`K$p?E4K0IPv1nYAu|;3Z1JysS^y2SSS?R4u@cwoDv##^y~sxs3TZ9P{;%d zV4{fxRJ6JmKGh2ygURWXjF~(9skC^I_ki6)F#9EEOd#ZJVmWw7$<^jN><83bny&>Y zLev|G5KaS;mcdAD^#EG;S!iW2dlFE;4^Gs>Ag}%LHh~9{Qrg)EWdHM7sD`c1JExBvYFoV>hx-(khc<7V#FICscXhtpKePdPzHNO}c{S>_$Md+4Z2J`3~AJd3QY$$aFIX z`~CFMe8)VB4>GIofqW${KcIdLn~0fokH)bK{=2Hp>_(s@oc@#bn%UH3)&+`=hYRR5kn9dZ z4t}=DW@k4MKznW507XWFA~^)W8V7CdN|4i6qAM z4ebxmQmUl=ftwL8iI;^*g+j63Erc38A%+wZ;C|f;g&~0xDhNPW0h~tJdNR=LCeA_F z+`OLKFu)Did$N&(XP^abKo7X0_}Qc+i1%iQ04)CA%1Iyuqv1qukiSCW1Bc&-h@49tFbOAM`K$%MhYGq; z(=Mdb8GBlv@Exc~)FVe+e8f?}(3glDZXwD$X&-}Zr%EHufLK``s0(E{f(m10Gpv~1 zip{cOe+QoUHphy6YQ=n3>^&=1YQ5Ar<~sh2oIp|=g`GTNh0%lGX3!tM2{;A|w$fM&6xeLy#&FBW zLg$8`qxT*s`p0eF79t za`&uDxqFzE1tpCq?*5dbmvA>3m(uxAp^S5b0}94oOE(x6)Op5~OTCvw2;0wtUob>WYcvweLn*2RYH5c0bU(rF-f+I~e zJ?;Jr(tMPJ0|^`4<^~5H^sJ2edjcqjt{$0)Qv~`U4^)Gz(0`5=KwY!|f-Tvtyx{Mh z>UY-HodcW0prhZm;p_foQ6+hf2lOhc{B6>^iD7!8eD4O5Y*?yiCAaCS<~NYV+e zhRHr%y%HyDErVkvwwGnv>kvLO-rTR7pmo&@vJdL!n2n#~q3B!C%!r+T--lM~JvOCr zmX&ZPC4eH3zMZf!;lp@*Xt+p=5T$WG!r={2V83@`)=~Ac2U1bZXBG-lfSt0eBkU(X zBsp=58&D1u0S23U?Wx6=&4)aSdmK=~W#JVlCwwu5)X?WQ^p~LYyTw0bl>rj~{NsJV zan9z#Apbr&%YW{*w@2(R&YC`73g3c4@(;rh-7PqhhQ|>F-4+^^RuM2Fc83FigO{62 zKsg6dy~={YUOskRc7jj*Ly2!btcgsodhiaaF z(Nrfzump#s%=((j!^xyq;0+K8nAcaC*^fYXVZw?9q@DMn+llsSHX>hA1Z0_%q`Njc zOeE)5^kMVbq|hXU=vWCIk%UpXI(fk9RTw<1<4v^u?B%~hoHUL1ymCKHgxQDre~Ohj z^d85?E!F&ORD%QiC617{XH)q;;lk9jDTT%DaafQPuv#zQ^bu7ATt>$hVvAyvB7`GOD2F7$Fc8S&#d-jJr7(>HPy^SbCOY;q)zN!e7K+yM^r=h#~t3dIqrFK`n< zCWLBTQF)H?&_Q-k_@P+0N#J~Z@;EFjpJP9)yfEKg6;xihC#~Q(ZYh#;qTQRvvpOgC zSG^ZDX0R2q{XOr+jl&k`Ez`a4Y{Y_Htc?20qPHk7(ifJ`L-K^L%WiOp6rg*D1{_>^ z;NUXg%>qvs%rFQj3@McOm7u2O$gv!KdljX@JDk1*#1|Q)^fF&wE1z`!sNP{qPFaTf z#0ZxdTwg#Zrfdbr#r}=F&}qOo#d(l#A<^XgOJ1`lz$Z!2mWEtukH0>@N` zI(+e;%#kF%0kCc1td+=iIaw0-kj`l9*ONiM1}sR^L(3Awf~$6`=uBEivRA8$iqzrk za9-u``*_!e*WDSr~RP!@FuyaNORz`6Sc*=`r{20Us4QXqV>Iz z;&Y3C+#iop{OaOZfBb%mPb_}0KmGv4hZp~d;^`>A8F6#-TI_P32pQYg!Yu)ftTa!+ z{uwgL)?fr&xw?NG0)Ol&1iAOjp@)wirFbMw2l&deh}glRfCFAZUw*gSY1d@E#p!L| zcm_?kSID*A)=jDO8Fa2`GiOs7{QWP{k8Kf8xSW{bCfJvg{t72C>gg9VcPv)3Sz9C} zl;5gO!Jmx3wfU`DDc=MRNFFc6>2FLjZiC<*AQX4gBeBNZvWlG$Ck^4`(=M~L#I3AN z=ZZQ<=V@wwITqVLe6Qc^)IUzSk%F-<@xKocdb{b77=3`+yqg}0VF#$yyXleKx(x8q zXoKPJ2;u&Px(;y0NszV3-=U>rAo$xWa9e^a16By_P?Ufn|H6y1It-12KgUIfHl8g7 z7yZFlxCZI4A1z&LR2+>jT)Pv+P|DR7H{moQ%MuKgP26LDwW#7$-B?y}iWsYUl~FnZ z&Yhw(w`zbS;{1H%i1b)c}FNQ7L>)=Sn}GzaaLSC^e5^9@$FK?um#wU zRT`XTjfHCqTKF048dwrX9I+U57-WGxD=v+$5>fc}gsF4yLQYHNlmC*L{dfna`*0e$ zCb{(s5*8dO9s}l79%^N+q(2(!Iw+3C3*c!b_>FDg)t4Z%X0Ud1HbwY0vVlOWC{*E5 z3eo0n4Qw%kNHeLSPgpr!CpmYRxzSr7|bE|d>kDyr&zTu400V?93i@~t2qsu zQlCW}3*oR2#)HpV$S9^0t62TLW|dHtSP8Js`xTM1D1xmCBdoy z-*z>4Ma*#qW?WO=7MzSR%zlC*@~NxvK`uO|k~sUb)^8sN-Zl2B*tv1_`TQb{M0;-Su;)XfE7y17S>o)H#K+t6l1|8A9q_&_B)#U<587SO5CqrF``|^r$AT|Ktsl14$T4-ce za~hgwHO|CRs=uX)EIv93VlOk(@oBlUtTTuK7}?X?QzW7oWpH&4M%(WrTUt>*4ewWE9BqqPRHvlmm_(No#gNRobd_evZ z+SM>R!?{Uy##0G`SS>NtvOMWMTeV@4lofmE1MYAjOh0R^N-^_lBlDfQSmBx*rAug;L zM(!9F>Cv6v?hBwUz5vxg@PW1yw$>+*LwF9MzF;+fI$y|j@&kEp_OHE3z@WXsn_)V- z1cT&0WZgr4WI!*4bewMw`Ew>U9kx%!7N&kjj}V-y>X(;%;`=>pC^)E+vv_SaXhzrNC#5mlI)1LbWO8cBktOV@~+J%;q{#VHtvxzI4k{34Nq7>`8CeG&fBIk9Dr`5ct zK~6Zm<0YADO5%;!e7Ysik>A=Do8LDO`g$PLn+yr{iY|f>Xin^6u{xLctmgJ!-0T90 zz=0_S+?+ba3Q)xDIRDZBo-%iA9?#>jfepC}D1a!agS&um`A-gQm~YxgqS#fm!mUIf z1#Y-|$o(QML)T$<^?Jyzf|@d`tAf1nIm+wgD$0mUuu@=y0YN4<)%$P25nPB|*Lg2) znZXxP?NbJBB0Bz-s2v;WIG+mylbh+CcOl$_c?7iv?r$W|0%qC}n6U`QDx8&7)xn4@ zR^hI!GHRT#SDD!)tH|hv%aszXr7RUPT&DILw#1A5O5yuTlnxY-xX}?3??vT-)p%30 zZu_lhR_9X0t!2}tu0z|P>_DxArfE_=?XQ3PN+99B#9u@m zbhF0mK^!`8XSQh5(aA1^o#gDuP9h}Z-No9@uSNP{)=qExvBW}zS0RP2Q3K4e&SM`O z`|Q}s%p=;l^JiHXpm4_@zPQeRVn4QVxEF9+Abl%@KUmcsZIkxJzE|v)=fBimO-}<`n zGQh?(Pr)ID7pdDR;zlI#?Aix~nBnFzuv8n#!uk0Q+SJ@faB2bS!%b0g!D0T(y(U)A z;T&@V_`wA$CZ7v3gHvk+44Pr2>?2Wz(<5%fWLKE?k)i6%}+2qfkKUvFkOzj zd*x-7CT^JH&k5#n)*O_v+Y)Y~xo*Q7K<UQXlQ0EIsO1kwbQM&F^EDHr0nh^tqwh)D2B7?_n zilAi&`QQE=G)hu@5lxJ9;K%_k0oJMH<2)NCd6<`o@)-0kXC=MmSfHk`cDiQkG`}$q z6y~3x0xU+5+li9FoOHubIR>^gcpbyJc)-h;taj85W;S(+Ri@{gWqvXhWtv(Cf0>$e z$lbp%!;Bqs(+)|yc1RbX^k5a#NV3>Jpjg%eryF=Q*T`t}QyBQb7ImkwPZNC^B_zF( zX9T(9EIyHg$#JkFe-8TyIOC_SA3Sie8c8r`C00{j8cFzr7LXdYIx2CGz~tKqz*{(& zWQ18k{xfpq06{0AH#WZ!(Di9HWr zfsSP->B2i6qq!$mQ&>m2y&rCJ<(~y}+y7L>SNvLN4Kb7IUjt@^Au7Aq)mgC1zF|GxQc*KD;q8ux7+CO`gv4T{Ko#v%dU$!4bW!U*Im9JC8WPF|nPt zQeq*D8N(MD6*w)9sp$!PsEXxY%SOT9ngx4}ErS=JWN_Ex?Am1omf_Ueg5Y;lU?{E5k{_LcT!Xj6f}Cr#788zpWDC|YJ$FPUh z^t4`dMCO4fZ?5%zxH*M=Xos;&_9=AzOOXaqY@0rG3PNB0<=u~L&(1bPZ>||5?Nc*401J9D1EI>2oMpc)z>K!eDq!w zWId4pJ{e<0SWvfgUui~8;tB!e0$GPZg&c_gjv992vsk0RI|H+_UL(yYoe9_aE)!P2 zv-rMyo0xoC1|XKT4GhI*zXTBuOFl_z{YbHwJAY4ehpI{}P{enUC0TYxKo(J)Q?)+o zPc%`NTIC|Oue`(pD0kK0TOw&0`Wi={NYS^#1LF=-92g$o5lI*&2ldDrAOR~9u{q%g zHfPzy@A-#gi$|QPjFr2wQ84g3yg;!hkRLbSDa_teq*X_0o`0%0m z(D0WWy)eqKb)m*1jSlgW~LW&z_k`#mg{XMrDKH2a&a2oX{ z?OepcE{Zi*>!*tSUT2tkG>HrbRGDl&kD=FMKan;-2`q;f|CSQ=YW`cTolfk)%-73% zOugw0wkplou3o$h7v3;b#eKb96b(4y^&A0;q|(}Mk@gyv)|f}9l4nS4sS|gb8}sGZ zO$f-we22dF=cU4(uv@xxpDeTp6XtZ-|X)jLLEb@LC+g8-eCK(kjtbdgsE(c=x zl>sG62d=SkaaMWIix5;#>jejNV2^%b-sZH(ybzhoS3A6`Wv#^0Zx=k9#*sAk#1`9x zg4;z3?lMvrV-u6~Rw%f^kB{!61`g42OJ$U1K-n#IupP2-FDB}){5NeCy=0G3e)uGy z={NN?vBlS7%Ty@Y)vV@REcc>Ou{538kBpWw7NTb{=8?`tR>C8`xnfJdp*$J|(n#)?bC)n}^~OrC!yU@T zVjJ$LMG6d0#)4j>^tztTIUpTYdxdx@G1@zaF24f)0ZVMg&AqWz1-(pjwe~rdVDvzO z-Y1$=+YR3lC0b8S)_Uo4{|6AqyL4bc>7xPVO$-}qT0gyq4-P0x#DF5ce2dr^P(bf3 zLfLMSQ7Y+M4K~wW!@_5v!isY-=a=kWA|<&cgT6Q8DJMrZkTtDeIj1>vAOx}s<@_d1 zY3fgWLCU#Eko8R>E54!e9Ya3e>xd=Ex?~7h{Vv09l;-qeraP3u-MfVXsF0zO?5U(` z^wu%@M_m}8!JSo$^b4L~bzP?Zrg`FXy`slVWP$DUSIvU%6Q9vAoh9_%dzcqgIhc3q z@}8-EneS@D^fouVF}x=?a_>oP2b(|z{}(Xt0p>kzWdchg+-o_Rs(&#i2qa5f%mtOBe}#Du+bI~2 zZQE5kwSsVd3kSKe_+S=4mY1@k{kaw)wW?FWyyJU`~A#Uh`JL zC^X_(4ZV3}Ve|;}X2m&n%LNA;mXCSQmr4GExNpatrWV`RjbtrmH#xjF$=WK&l8~Uf z%h+2a;JvYJh2Tb`=FHSpO{E6@`V_5zRh+@VKRGio1JYxG?G!_z1wDCepMo4(CV&7s z`DRCQqR@kSWcGcBajydvvhR~(P#Uo<28GnmnK#J>04fQq&0U%j}44QEt&ADPPS*R}Q5R;-4pJ&_vMFtyk zrZLP|Jc5KCx=`z~A0xR&(sdB)b8L9*UYju&w&ii&2{g`v+?Z>L$%2-yPopGKtA-p~ z;230bvKz@5dvT^1>y%u+_WQYe>n7J$$!|t#Ef3ua=4%>5a07wiT;uz~;TG0K3O2$tJV2_vX z#7K-OgJc~4!Fa~$Rwt#y= zF6U1H87y3Xh*#3CI2x7k(E~Vk9snp7+t@me5h7(aTg*yL6&#lde}D0-LYscFo1b8z|zcF z=|;?hsF~e?nGj`O19-rRR8?-oQH20f%OtiY71;1!Qdm~Y*3>VqQ^{u$;DZ4o^t7-YUri#DQ%{Ta|6WoB5 zxLG;S8sP7q5sguAWHG8U|22CBHi~@S!^#6sqF}&AeMrZ`dk&Zq6H$0jS-0Vpm;#Z+ zcx--IKv>!jfr&Y2#0&%?sklR_61Kw_6;z39&4@0^+?Ey5au8UB3~=lbtqs83eJ;SF z)RjyE`7FmCBHR@KW1?ynBSx~f7VRYh8Bt;`WoI_N>-(ww67EL?3k{SB9EKFy?mw4x zNx?^9tJ3#VQ8s1gTZouZD&G|43Onx{_?OH{(IzV|6cij;r}u%>ttBP8Kqkf5OYO6| zISIJT6lr|gG%SPHc?BhvXqf5|g{CC&RIk7#ECEA&=RJ8tfxQ9`YMF%%j;<`>7BU4v{$McG4;(AIJV;(HTe&fO)7~OG*a2d4a%}AZ&tG-Zo|DjUtVz&KE6# zK|;BIG0N`r;EN>~5P2nf3=J!yCRHGPut|i6{v_r9R+Gxu!{V#em&ywx=g(iKqgkVM z(X5n6*2;B8j?bryHm4+C>kOCA*C2SNkJ`8Qf8M@-qM=t%V6c6+iZsGwNc-kd`+WE! z8nlf-V&7^A$!Ylo)2yZLnPasDjj-({Nc)?jDY)r}+F)%4nEEA)w^m7O1UQ$=)%zlP} zONt<-{v=5uc!5Ob((?8FlqPBG_5A`yy(*GgTO=eDzcw)%Cfejy)77Ex z+r+g=xe)r^2ZO8N!1}^*V(pyA-+7+$=YkacLj-k?*razdfk?h!qSY%gODK4wmWO{X zPPn0|XuNcVV1N(22`Mm(ZQJ2*NaMqCiDU9+M z!*Ep){R&PjSKN&TXB%-Z8Ou}-EWXyEe`Hf%4)7vUG#K5Py}NWKF4h=LWVJ4`xw?l+ zf$Qz*#Ax1&B9oMHh)QX0(Qh&(3~9y?#uxFkLpqg8m&eFGXqyws$+nH+za1!u+Vt

@|$jDp4t7maBT@by!vG1&J_?=DS4W3Hu6w zu^D>0gT`DfGs$gel^vGnqMFm{Sbi<)U=^ovM}T{v_J7pCAK-2wQGBXnZ^mrGc?bvo8MSvz1spgD`Uk!U$&1RXiB ziRLDk1WeoL$6{zZ(?vgjfdRksQ|J|JABy`ECh`m*He~nmN52(q!R-kxq=%5#(KIn} zL~My()Fw7fH;>;rMA{+(1;m2|oZ);nqGU6zokoKJN)7dKi3EIEij9ciXht zv8{BCA-qf{#{6gCkKc>mtqAa$FGGaMK#t4K@nbN(oBm8cIMe$S7UyjwVs!oZt(d7| zb7u36v2AI6Mx7gFOt#8!i!#n&PTXIHyGV1R3^>@om0y9&buceznv`%ftx7WsYkJ68 z{~S5%M*=IvZ_I!|FZ|~vJF-4R!5u?^u^+US9nODKzmT%6BDOV&Lb4ea3U_`R1vJAA zm;KzPN&FU+$qq-ZTw&O#+%e=Ff|CJ>;X`W~@D#>A8Uzz08Hu~S8w&sUN9CSW zMaZFqcBaJ7AbD{0QyR{S8-5R)eFl}o|Dq<3+(O(~@Q@@qUI8rpFf@R7YtXnVW*CkLFO;bNc&1^Q&q^imS5H5D_u)|n@dtbATexLU{scQ8K z{0foM_$;z`D{_?w{|y0C%Z20&&Dpt&zQ4BJpWKci^kI?7NTNTQzcmF_o`V!e;%S6F zJS-FAa39pi-)sRKso=2>!1=vs8dX%H8Dv@R(LV%#G#~Sxxe+^nk zsF9cd2PUF0g@!sqqHC~&(nUH^^o|=R5a~Cl2D*y$vd2Tp+J6RX39$y8jC@|dM``>3 zErhERybREN)Ngz)K(XBinxhZ?z-DtnP*59RErJ3Uc=n_hba%dh+}n%wo{lYr=q9UE zNAnjagDSo7TKZ!=T~H-1s4|QE+%D-??CRk+dI9(x8jC{;Ek6>v6A|F|MDKC@eYBn%UGK26~-S zGl-TwzX2rlBrtR0_pr!G^)Di+J$6S2j0<80!7u-pfeRop27#nBXiP?;sZB=^zi}n7 zAr7(_6R7j)KmsR<{*jkNW#yot?{0$VS<-$1guRjcj<>k{(o9F*Uje);_sb@7}A zvkP7}TkuPvgR*;^=>84a4Ul{9rG1P|boI`dV;+7?wu*naOZ0FxRS61_^r9v-4);#E zY5N&2uGCzxSQS4)Wsa|*9KaGF6Q$mfW3*gX-Hq_MK4Yyrgnj; zodHzA?*st-l3xx)@D%p)2KtC|_(x0A0EZx^o>Z#NH$cMe}d z@9X(O5%utS;+@BD5bx>y8u6aNFBk8be3E$2;$y@+mn-63$kWAp4mbZdVdyhA`}jEo z&CR9!jChyx)8f6DpAzo?|ATnn!e1Bf75tERui`I>_Zt43c(3KphQlxqvE}R zKP28N-znZ(d82r52O7VD8!^xClk+M0@JA1uI3G#eO>Bk1M4dD+9c}&Na7W~x4 z^W9I2X`?aIn(tqUC}u^N3E@Iznw~oF3u^DPqlM#C$AYCAxt@OBJiKYxf-=kv?Mt<@ z@X&POMyy+@81d_RUncfmaw-S2oM7@C!T;0Vxd290UWlV^B$Ei%bK85*z2}~RmA&`>e*f!VYyE3s2}W2t*mRDL+r|C9 z-BHe;*vF%45dPr)Anr&THpVEgmMG^A`}nF4xLvr{9lmX$=(*rPy-;UNcrz=pvd2^n zSL)zXy(+bgPpeXY3}em*(8-p1R3Xtv6xu5|ZyY%94b*Ei^$HB@{&XygzSZ$vqKpY~r}R4}Ze^cBgxPX`g{_}Sgj z;{Nz*KOU0)AzWJ|{oj-ROTOmlKz&%Al>X0?;}_&#p&K`I^QR^C95bfVxkWI_+D`>} zt>jK%J**<`M(5?Cj?edJXX?3IZ!;XX-nOD`GBoXw3DKcgA;t75cZw>n{P>CB`0p+K zcAB=$-}-B*tgp>p$pu-PZ65}AingU;cc-aP{CS#uZd=cv$ANvoIBDKk^!U`zi)x%3 zO}h2-qJ1qkU#m*}V0Y?_%kHo$RFtnJ+SeK_Wq7hX)HW*&_EV*V7;VM3zT1~HZlWN` zKoT$!a07{e3vdAbjBlN4$hhwmPm`y~^EA)XJllD;^X%Z+!LyTRCr|jI_jNVdg@vQp z+HIYo=I{rl(xt$9;9f}^>G<1FMlUsve79;Ja*=r%*&;MYIBb)C4ZNt7u23h8@9Bhr zpMU&B7x}i|PcFf;Z_?6_@=99aKKaz@lS$Gi9h8L-5_p@PKNA5D&^XsN?nwPSo9_eF zdLOFR`$a_3QnpZ-p1%4Z+V`RAh5Cq)+akhI18NxRvkz>(52a_FTXLDI5iv;namw&C z@GIa&U@veGcnx?Tpsh#J)+2c)@=WBJz%zlTizmXO--_pnfa#>Dr^J1SBolnyV}9RqJggkQ8*+(SQV0ZRd4+J6-wAV;j}bDG zv%Io9W*{f53OE^I*<~OQmV|J^>++U~gs?uqU)AONpuecLv!SalJPu)+X(BJ{f_@Sb zzO^&8k7HQx#X)yd+Fi7lCizq9=a15F?HhL8a-u~!iV24Y#T^QU!{ zzy%a@KNyVRv@S+2W^M_82|+%>&P54kmL$+nE{9_yh&RjZ#d!=%aOw5)#$eD|pOKzl zro`tR4>7@@#^heAX)EMxiF)EM$opT5EPsMOt83~$^A}r{yuZuunYhI78Nb9#po4sS z9bXXlmrD%Xd|2k;BD{-CLiQf4p4jVY!aTfX$$?N4 z@HW_`44C#^9PeKepR(9t^ix+E_T()7&373PfdQcx5d zW6?^fPSE2)R)C9OLM|7oMi*QJXFi0yOtBOB^24%Q{IIMghjK zzr7ECJkUUM1NN;M!~Gh^%nP*Ee0G%)c zCt3Vlio;UG%JAx0$gewJc0L!s@JzE^cQ}9hvac;EFoH{5-zKgHecr=pD6z7x@U|5~UW$gZvHPc0`w^an11p`i85cF8iVrFY$?WJRB(CCI_ao25US9JC2K$r@F#Bi9TUS4RZ?!KMRv9o(o zPU$Cx$&J{e^&=Q?X!rREbDV+EOBaQpQGbW?%0`C$h0ZJXAAtLYapTDIO5#5%+&Dq} z!I2;2bK6AzECtpB-Di+5JFiIU;IrLf&wpM~Ww_vZC6vZz~pxcpd=9 z{X3jjBr|_dDm@aI2+R_f|Ly0MM}H{!s`HA6*9)9i9;YmFq9Me#U-5nn(D(?SG0uBl zk!+AwA^9P^d@AJSu;JCPi z`{r*suPE$5&KG&P=1Z_&gjTD2wu{9r-#M_eGc`i>i!uiI&P5v|&!lC*8wa(xpP(gC zDA#L{I2=Uuk-28IymRPqfSIt[c}iI#RErv3nvcIClH@!{vM)zJ_weD zu_-L8NU*GlC{d0L!!VW10^+~>qmNB~Y8H+F}!P8_d(PpvjzMJQmr z)FkX;2B~<|3JfJeWv@IXo~nTtp$}Gjie> zs8UDG*kid(%i5QCBp~MA;#I186PI-nZ&k7!k8BiLJSuR>h7ArSYHD~B0I z=T6L{zqglekt0JjG5z&|GWb4?+B5+{p^fgTufl_KesA{@I&g7rNq==^SGc5GcM%$N zDBG2)qExz*Z;jGN_-iD-y8i2BCq)p}2lKcspLg>w-;qwg(()HXrZa3jd!}spuwBVX zwmX!iwU?#7uoQnunw|OlU~+c z^L5Ak3zWhaA4B^FhMMboO0k*O2GL)lD9_<$5b>czbCvKcSt+u*gA*=%dH>Q-Bc11h zzO7jbXN)&5mBf=w2anK6P$YcJZQoWa2#E!v{hFKxxm7Fc)Fc9iC35{|Lp7bIDjrhC zgMiGf4r2yquH{U7WdMio;XS4Y%Ry{q7#kv#gZ07i`7eo#MMh_o68E*Fd_#nrri^4b zX+slbsv>+8pmck%oLDUL()8NRJ#Z z8DReF_eq2zsjEXGs)yS{k}ykS1B!ZrY0f6O65^lslJv3g&wfpDg-&EwF8wrc=hSwm zPlV&n%%yE_@onOwK?)`GNJ6MQ0drMuBYWCH5dkD)uErh@*k}#GcFl<-;;TN+5vb|b zctkCv;*zL7f)A;QuO%(81r0)&aUz4EQu;kA!k@7i8RZ)koMaWW`5cC6n@{w!!J$5d zx}l)4VP4xL=BKi&c^{n_Qi`q@G{vimblcVR53b#*X$FUOQFm!A8JKahNSiBdY+x3bJZfD8n{--FLUM4+Mx@{vM_ep zkk)U=K8R(rhU(X_faI*ZO}cn`5t*O}lx^j8|0rt-)o=Axn^DGcQTi!#7hxLTq?|HQ zB;T6(nrsCeYK0_o%)IO+CP{n#+|;w1ZmvD2c-J{i88bp63RjyKOE!B!D3U{RCs*Zh z&^%65VM(J34230U4bHS}M@SYS9TEK}c%)2<$h1|T;##zRtjRt@#1T%J=kAhOiw+Z% z7DpyWVK@6%9K^uVD9LDKj)dR^aZK6$@Lt)l;sj@`QSzBm{TlLG{JKM_^60Zr2w~nr zr>P-BaV8OjjWm?hQ3$ZCx+lyD%q`~4iNF9xWKi$t&pzBhwN9Dq-o^v9@=abLR#|

KZqkLal4YCRR9VNhIM|rBqmzzcImvcx z66fD`zj4}M-A;gyA17cSC-oI$`q?*q&8~)Qv|C#(aSFd|hYbf}FFVB?n3Q?Svt+Td z#AW4x=9X}?aizE|`r{}3l-H&b6-{_j#STR!lD001vu;K>KT;*^ChCevBwCMFpg{JI zv``4YsjK1&142Pl%%A#u3rbGso1<_fngd1`+}!pMu@z5Me_5UFxiPYKqFL4_`WXmY zeWJrZUKzrrMuBcHupOq4Wr12sE*T-*CXh;FA=)Q+BMN(?DJ!kq?%Ww`xlG3e;lz2t zY?tl;i?gHO_79VwJ_cThq^>FqRUPlqS?IuI+CfSbNkv_1l~7eGaCwRmuOF|ic1ac2 z9ldo$TN~LhX~J01P75nyi&d8=Y@QNZ5e<=6v_R3rM}nN}5ae`^LV&sAD<=;*z=!~` zvJ0@i!orMuT*5kyXNzJnxfU!+#FTW(syy@yj7XX8#zD_9TWBSg(;KZ25VO;is;-&R zf(29n3U}agkC`j4sjX{=`D1EkCC@enOA~v{GOLYQKAdPN6+?W+QE4fLMhrW4RGbH5^K(rm4T}`=ra<6GP2}cRBE9K8^r(O+ZvKpJDL~qNguPmwQZp-8m7V@ zN^KFU8@Q*E7UJswZD=OYtct4KqA&NDKSOfc-#M>@o#)4;YLqtENdFS^3K9&dFBr|M z*loqE3X2sMmi8hv#7H5rqGc_y=ShEbHT^m7S`?4d%B+(-6dYGI-*t5E+< z^P3gqvBIHjFQNKiDKj-p;Y*MmMAXOK^8{gVhrBn?Un}%9(JqaGPiann?Ll$aX-{n1 z!AnTWyjwZ7y=hrziEYVZVX)-}D^!8a+Bc<5#*3h1xvWqS7I$WL>iwNNvp;P<;TX`| zOF6ZibFB4T(YJC~mj~?Ev*ln|9sgYVFTcLiEi{YE;!ZWj>X*aK9|va;HulW-D`RH9 zw=O#R&of(j+rwMS%oCi;+oFskQ}@q2q4x)O3k5e6yDx`kLvQs@M`+D)vGA+`X6%Dl9YOA?Qrurfg>XqT9E@^ zgWxOT&hX+yo>7=HCb!3BO$p54I3{j@qbN!+nu>Ti*O~vw`5RU!f_JXS+*x#-zFp@m zr}GGVhgT1=p-TFp#dtAVjM3QdpDoi{l*z?1s=d~(E;Fkn=*i8+oBcJ3Ib?Vh+rZWNZ$pO`dl8LcBv_cAA zc18lYB|rc<0u%wEdTGEup|%_S`L>@ui4LTkvnNApm#>+b4WIF<} z^J}=w7L&$J%unXCb|Wy{z3WVlMDNhz3o7S-3)6oqjx)7WX0HTEH{-=9>q+ zXXtoVPHKfVJMk8bt&h;MII}u~0l79^#`5CdW6Ef!eb|E&Q{UJ$n$yP;^Jd)qhw~ej zB?c~nN*%0zm%$}MD%|VZuS8W+Qtf zS+Uu?;oSPLL}G`jMH zn3`(J{6K%B(Gykos(!d}z)Wr!%sjC6=V@s)qG1MJN~uoVlq{jeI#XKPMI;@L^`RBZ z0Fhm zEI{|uQr0z1gk4W{mj*%4Z*00DBL5ko{4X}2{Dl0wAi#aSmq_r~FBHL|;}P&0k>OU! zhx64h5vSKwffV0W4JQs2dFBrfQx(B{AK=BGc`U!}S&BFnE6QSvw?`~m^}8j(4$IzQ z_WzjR?fD!VI8Aa=N;O96$fIWzW@IV2KtfOm4MwFVU~FM5pwL+-yY-+$4mvEEjvjP+5JUm8n(w zTE>U0(q9W!VAi2soP~_07HUw%Pt_tTYxD^79a6Fw-(PjP4xwLxv3Ycv!%RV}m`xvC zX`nx*(H@IF+EJ)392Ul)-t@Oj>L>VGb7%C~V}eWde6yYkCcYR2>L5_BFiz*D#3I_* zY)|v0XvW#xv=Y0=d;t!!=&NUW2H8t2>2H>>rUwQga=@Hd8s$Z+x+rNk0%K7J*cGvn za#2GFTwHgcx}(hY&AoeJJ>OtvvdouZfGLkWz?5@JX6KrhfDJ0`xz(qU+f2hY)2ykx zl5dMrs#`m^OO;aljpVNpXHI7j?NBazjFr-P<5NZ{lysyym6ILI!i}auR#r=s8-sHH zo|F}x&aDr!mLdRfA3dBON<#lrL!uSm7=o9syd*hDuX`F0HkX``(5Ixonj|KOyUg3^ zQc-Q1zi|oXoEJ7t`z@l)r8HbVnV=3@R147(4T%Z?MF>|u+vhb+dmd}f?PMV8SW8Om zNGeF;<~ukE61hiT7Fejt`7XmU^|R{ev+p#`i$*Qly)%e2TjDu=LV)p<*h6u5gyTBv zF2X}pxW+%;eRIVAvq#45Tg=WlQSFR|)0f>5G`p(9xM7}| zFKtPEbWZkN=1qLjD*3c&W=C5QZ78nOyIt7^bEIKqkTQs5B8y0Tx?-c7F3RU`pPOs` z_?hlA-(AYe*|k@#n%-mt4P66m+?M)nmWXqWP-^>As_PEzQPQQFQR8 z8-h3Q39C3Q91oVz2*#A-KL%2bY;8!cmJ9uHA`|C8 z$NX`>3!Xc-34zzMQ(s0p^HbkPL0@}t>MK)QkhQHnsYONA8Y3sjLq95yD8o_vXX;;L z>_rtUVz~Yrx{&>y!BX_$%=h%m(WLsmNbc^@hvIY`rx=`G3p{Y^ZC06YKwy@l-|)Hh zU=6u>PjJFvP!kJ(Tc+sbM_EIjrY|G=W}4NvvWB>k^nM4`K&TNt=8t0byviN1Lph6= zm_yLKL?eam;`vUGWXllNQpvgH+$3sPb_yL=Bg|EjmK*vv&mK-$JqW8%=|ASK>2#&P z_Hr|Y5Dkgu7#^X*C_?v-?p6bh!n7?WmSW!JeSwnSm}M7T5((zV1Sgd@d05#6N@`iq zIof-m%Wyrh&Os_zmvwFpf)UBIy{<8BeDtovo%NaL&_|tBV$bJ-C;E$apFPY)zG1$1 z&owMVml>CDJKAdL5zE6EYkt$pYmLfF?wDG0`I8N*#DQu4-A7E6KcN`U27=18Fz;s6 zgRIKZJ=&bE;>8osoUL9Ryh=TbC>SSDx$a_ae4Sb3Y{(ciQKVJ&x*C=an(TMl4xLH2 zXX$$5{C?<{&`X7#bw|C!?@WU>(wf=M60Egk4C)t`yyBd`(C=(qFld4VoFf6R4+pHN zK8Ll6cJ>?zJRuIOK|)?8A%{uGgm6egv3W?S%i_2=V{%GzdHk`#X)(c}lhxAXtow#+ zFHp)}cHUdTEBD@=-@HTIVx!PQ#~t7^T8*<#^hS~|xc9~6%di^At;m{`IHO;U1JyJ& z?$6LV#Y%45gWjnIu3a5-`VNydN5;meS;L)mKjUK-hMMbbbJA&Cbq9~|S=gw!q$wS} z>!$M`UNJWuIMmgl*gmkLk_ZS(?`c%lMZ(&XFK8NP#)0^vSl6vFEG>}Yt=qY z>WCarV-#iQR(@uObO3d9Zj~Ae<}6f(n;Hky?Oz`=r|lj-I0#^gmZN5;ee)19uN-uf zbLW7xnioz$Qqpv@afoy00q1WU|&pEgH8343To6masFPXZZ+i2fw zw(TOJh6NWV1zH#tgBTU7eP2E-U^0`E%lVvRweM3##v6R|Hc)r2ZWu6UP8uu_SKF^7 z5Ei+b&tX|(bW>KeN_C)b7q?VhC2@*pFT<#gaK20zQb%f_ppm8Xf&=AdHBgp?2g=0N zzUt06{THYVS>0fh!O|&%MP5GTWr9DpB_rmtxWJV%cw()yvDADh1(g)ek#K;gD6diD^_G>B>y~3*2ri=>?y@k#|fr6r^y=jEkKl3E7 z4M}aqf+KgXac<4$1&vT`xA250AV##H0=5ek@I!)vK3Iwme$0oDmHS)WNy*wIdYTYj zZRu7LFxIS58JMfP!&x-K4>+HK()5vW=nSz9Me#w3T`4{giqU44ixKrd!tunBaOeaO;`@Gg0VSi}FyYeUlc*jfuoTFFEd zOR8Z4RTBHrnM_v=qLS_KTIyGvYt1|?i!+C4y??`sV=b9MS0Ju6Q)C6T`W3;Z%o85d ziENh~l0#_RtCgzGELP8JHB9M!#^AHfT3W1T^h?P+q1$V+gEe9y%{FPzuSsRs@Ay-r z&&$%MWa*cg*GZ8R;SHL@d5gHczoSYe+a|;+l&uAZooROH4pP=g`GeNXPLfFzb`#S1 z2_-JE19Kg4B`^wb`OGw9drEbu!t~n%qeIJiU}$Ld55)5#)skz}?aZlPlQ8z#UJ#-| zYO^vmzd2P;V*j5ETWQQ}A;NIjCB|%xCEmF;jXrG6JdLv!xSAK@X@Sdl!B-26nk^;Q zowGGGn&>N2cRRN_tq77S`L(hZ^0u`V19Af$;OpSM*@-NJvG_@@hy5J^vd5CVZ8v5tF zwQ7lkRx1I6-#=R@`m)Md`q#Na+?08k)vz7fn~b?P7;2Kt8t}>IiMVUrKGxYujGZWb zLanz`MzcgG7IDuLahiX|7e$b)I}hh9p%{<(HOiH54&kp~Ytv~>ArTCn#S8~^$oQ)X zh^?`%yGTMs6NUtL_ntBL;MAmDP#8v#36b}%i_U$y`ln#i)B;*>S*Pvjco$ClL? z%=q~elnuXpj0WVh4c6?B5^b?x@W;C;BYJ#|yQV(-^BV8xS@qdyP_7}XGtF%KKWAjn zLectNCDB|O$s?N`pgU^fn(!runKLO{ZL*IDdN#goZ=z)9FDy|a4b+7tIf&rq{hz40 z&UP~#62@?Yv#|LPJJk&HQ3e)?F*x^tH_b5TT8Z=h%QKll3XntrekU{W1ucz%R_!vl zu6JTwtI@B2wku%k4*@aLHLf+aSdHs*_rgZ{Wh2W%`KXEPa`u}qU^8Nd`Gtzm`f-1-zBi0iySJ$H?3COIw5Sts}8 z<+Vm%m)h*yTBpLCW?Q^x1F!Vd+Cd-yYm=~2?%cW>C+BZ7&rJ{WkI2`jH+ zb9w~ZgNut( zRG;4bHiKMr_Jpiv$aIiF9yPwvac%awnv2~cp8C&!2=C}j(2#tMi zjAaHm5bPpSUwa%RYp-#*{ngfz;(tXArj2S*S=&8{L(57D#>Sy>ye}&aBu|6{WXYoR zJy=+9jhe&f&&Pd^I=}K3&D!?hXM~&KKNL|-rI@I}J}9IBm%CT4Pr(h2lA`RU!W}#z zTt1O71J@X3uEEEm16dpYC#BMwiUd{3p3PQWl4fnzvSl_Q9@M}hNeE;-!hE}nWGGc1 zPd%s4GDneKLvjGcS1HB`9XaviNE~IJ5)rQKQ@w;(FbQa{p*Dyv{NvkHXAi;5a-v(C z`r^gH3Wfzd%G^(xROzgOnu~kNc%v|Y{{$u`D4$wu6mDT|WDAsPz{x$PmVRmi?cZF+ z-U3yHJ4XL3ya%Jx{3B1Os@RU`W_KkhwTO`EP<`_mS~KR8U+7dTIE{Ja&Tt#Gon$nl zE(dWJp-%nLFGR6dIAy<_TXIXDnE(n>ay2-K8OIy5nAx_qmLyOgtQ6Fj%*-=qe@HKi z0nCq$syuW4!}7)5RiQ;?m+>J6id0FQbux>KbU4=#b?)3Fg%G{}A@pSk=NYO@J@Gx( z+{gD5$inzGt&2vIBM=9%&Ys$We)D#=;$X>?T(d~*H3&8|nSsg$L4-o()4BCDnT9d8 zE_0`&P_=OS)^ylwt2<5* zvwCk}v{^^0RD(Mo4Ce-R%T811{Z?J%>mVhkZSqsZUab`AH#ms$5NI#mLjx`}sob@d<%w|L( zocFxQ+iwIN$`Lbg(^wA>sk1CDaCHq1dn;88aoAtv)vqavty0V_rw}n1A$&%RTW^fp zY)}2T(vF=bG5SC~B*4=@Q8ksK&3H(1Umvsi=+-mqUO_!8b(bJ>RT_kck`^w4=oz2- zwmQq2dD6)hOs(rtPvK;BG z{Y=ms-NO?H{RWf<@R!l@1ap~PGv8k0k3-q__{PCC@7C5Fh^ikPxV*RPmYM_6 z0kfvSzBw?k$ERj&%~qlI8?ow$vto~Q!31rW=wT=8P}xDGS$oy?u<(xFOYiHeWgsP# zT)aFG=O0)ID^^KfcN36{h|5_lk9ol2Erhw1%VG`GJQ^J0PAl8jr?Yx*E!U4=K2it(Ud zQ6rhrtZtLI1dW*3;fTHQ-7(GY#w6b|7=sK8vsi6UF!k;QP1I`7T{{)D%r}j9f6JY_ z`axh=-H>^}`P?qy;er7j3=la1cXR(2P^}~G5U@)^Y9R^W~(Yf&ei6pNG>XS)n>Z@{y@SU?&+x_PP zwi4TIm{g4?h9h`GI^_uccL{tvDS( zC7i=<#ERSNqK5joFl%3Dof%|KBvEU5qQ@ea%d`kN0xVuIHgfZRyPgfKsk;4%Cssd! zRZy@kcG~O{Xfb=dB)TDUpTCpV$~J|+y5e-hioLf6Tpsho_n_hSP(E;qsV|s#j?^8BAB(5Hf@{N#z(eFM>tMXu;~1uk&K# zE;Rzpm%)M=;(^O${@GT2SY*Q}7pOi8US|%YNHQuI9Dx}gPKACg9BY2xSRbtn$9iuY9oSBsmKgV3c(wEn=%-nK zD|%o2NhvE{vveJc2sn-K3I^M)_Ob0-oNJyT-AUD_7&*4H{_58PGyIvmsB7>#GLE9O zM_%Yt+6~?L-bud7E~=~mV~m!R6?=_4{MCo0O}Rex{k}23X2mR8`5ssCbIoY$sMFI9 zV=R9en4=k(1bGJ`JxbOSr0X_SY1>&{IxnuM;$(R1rZhlZsNjrRzXB)?&li~var z?B}%klDLWDf^4)nO#Q>nX4L#{frSueKHj{6e&Bw?L>`d{`ZHFsWS3ZmQoc`R>p!Zt z)MWNo*@Q0+(@KUAHQ#)n2!1ZmKjktmg>5tXOlEwvo@l;@bE{CFH1qfBRZ%~VD0^FK zYxkW_5R7B$+uR~XI@m1DA|0`t2h;L9#E9HeM)1wN?ybHta2K0&yD%+>v34#tOPGE6 z`4T2CtnhJRUgKcr&fU(Poo6zxgN->hy>T#X%%RSme-YWd)|AY6vM0lNYNQ&yn% zUR-P#5K5nU)Yx-dWQHOQ5Jo1y$g%9Mk}!8IeeMr47nESfX>;2=StXRpPm!JqVOg!O zss1JtXWbeChf1w%MT>HGxYweE6iHzp10k|K23P|lvUm(HB!wrCOfHOAC+sN2t35LB zOh)u5B9syRTR=6tT`Fqj2nANt5guo2m zFRo1DZ{oTuaTy*M?|e>p@X=?|N4fNYq|h*m3`rtjb3S)K(tr~W*Ak!p*pjtM&|QE` z1g;w|3YQ_Trwmq5RfH^6ge+BrELDUoRfH^6gsiVr1gXj)W9({XO@BJWxitVf8QE40 zLOB2Ws z#?1K7`D%?yj@5<1AMJ1LLKc%*@PGU7yMNKNXMh&qIPd`w1JXJYmE39l%IX`-wm@a3j$7_kLoU_KWm1ZQ4y~+M(s#*}g5UJIHUI zPSYM7*7F_qSY1$D>MeBZW$%;b7krZdIkX zK=(%axhGU<{MY7`8>NNrvT{ksyGmSfD<~6()x~9nZqEk2sJu*h8hXL)rCx%Nv^H*R zh4Ps~G%44(vEA{?E4*bY)KyihDvK-hDHR(epUO-M>aj|vX=}79ZIxE8Rcc=TP0ZDN^GT57!tV(H)C zO3L#<8gjb@-_RT@i&pZ}wDlG1`8fyy(bwVN;ozTqYEO+#*R)Fkeo@gjd%u`iNB_71 z@dF1rU4t(gk}&k*OA?0-A2D*&=rQiGmyR1h;j+soUUB85$yZIeI_a8gr%szb28}9zb#_CO*6`47+OuE!lUR3AyZUP zMf}9 zGO)|^f>p#MMnvkDSGlWws z7zSx)=geOaF>~~y;wpDRRh4(m?WG&sg+^s@*&XgOl3FXppd!U(#d>i;Y4P1E`M9ML zo;e~F_7c;5yKx8K?hWNeWn@{WxaaF`g03mA(%q%ScX~-(s#EE$GD>xK`D*v7g3?mS zjFyrzUA3xwO@*4`6R%!XT6u+gwNbW8wW*rn1wDl-tI{itRXUaDzw*o|EzK?{E>m@v zdS5H`R@1wz+_9cwU0rLp)hM0cEx%T zdqSa%f;;<$zi_*RA{7?s1r%YR)#VY>Qce0w?_GwsN(v*Rd`W15p#xdT))X_L7cZUBTaR%G35qstwOO?!9I7T6x(TZ<$UVB&=$~^M);`yu*-yRjR=yteQ`& zS;TaiuobdCcdtZ}ge-4fHG(xQyLeS)c~$vp-JM&kYB^`pr0(`uU@dwqPg)%FVak*# z+AQ|&J1SYt$_iMKjj}t-%GZ@$PalSwFjLm(v2k&1q7rPTTO#x07|yMMVxr?D~p|brlu8 z_G7&NzyG75fN-+k}Y zzx?@qv+Z94r~mDP58FTb_m4Y1Idiu2)4zPy#pTGq`9O5x1J74F5dCM@|35qbzq$SY z+JW@K{^~&bpI!f~teI=p%&Zd9gjUFJvOAlfTV6Ks)3UR#E-bv77k-{>O-lzj6LXGJ zM`vwe`P%OHMVywzImcVUk<<#1Zrov1>6&(ZBmJ+sIZe9;i1gppryTXS_V$nL*F@;USBGfC;q?2K?~0NO$CrF(miG4V8~^$Z zz5OHem-q{7zuf=oExrBw_UHKT_4e3MojVc!>izt0p32|GQ&|!<&s*lL zgt#=vqLj_iD@!xiLc4)ag`Y0mhdDx04|5>O?0E&n`rPu$94I-ZUTbI6zNgJmypm8b zw#R?6K}3&8G^?PjuoMj96G=6@ywE81&V^XJ5Sk64-_kOLVn3%6QZdB99CllX;qZc@ z7kCTSdcWZQm!4Ftg!43Ql0B!?3odbKG&x8?(hCbA7K8uvi;85TR7l)8R(7W^M7e*=UzOp7hJJ^) z(nEEn>)w|f1UFHnFHL(gIt%)yVs2=UsdtN!af>R6N2;LxK6<|NfDkslh4af`eF+6m z)0!jQ!9K$7ITAO0jz`lHq%{_0X3P5tN(1MlxKNE5FdyxD`_j@X0$BW%S@IR)qI^x> zyE!eh_CDPVQi&xzl8mB*r zXq(Ugqj7T7_*7`$Qn*y{aBS?iP!3mTf-#?^-i5iIkYIy zvkydkGkwAIZ-|;(YE%_T+BX=hS9>d&X@8DhFekg9!fHo)VvMc3EtZyt8%Q%FL(vv# z)_jt-m-$7!IlWy7(ZP|O!=%4zS*IFa1D*?m7zHOeWzo6==yb4tsryrBtvuQggi z>ruM)a71ku8G41G%jkWeSExKKMrK~bDzG86%1Nf!ErdI}rlO$I+g;n--Y%5-n3OSM z9OV{N77Jr0UArlB$->M9oCgX^IV_dgmcUk!bT#ddR-D2`tF7dFDt#B-`T)nMV2ubY{4f4woL&rs$D}RvZs(Z@^aBP0$f0Qcfmk3O zaD<-XCf`y7@e`h0*iX`xxbj3Rhsr~yi?|I2E((F41EvhrZ{8zFFW^oFyUm zoY0eHTBV=QQ}SjxR_Uza=>}MEkw-%21CX*xJ)}G}fRwp5^xVQz{C$A<*8x%0>u9fK>QPF6ltGuoAKJcHblus#4r3Eeullm-+iBb z{ri6ZweT1652y2A@9DbW&#J5Yg1`S7ZE<0ygjK%_6UF~))L&|G!66XZ$uBqr-2Zjj zfSUY2J`{?Ef`>)h9gnkNt=zI<%h*uoJo%3Gvi%9`S^L8iUGkQ;sYX4YB7F0Xw|2NK z?=SqVMfO#GX`$z{Uom`oDEv;szw+3r$A)YF@|gM9%~oO&f4kG)v|Ysz-BF9*y7eu$ zcH3JeZ(SP^(t52udhAappr>84$%KX=g3d?)=o1`;TQ*b%AWlwPua^IJY^Ce ze?Lv_#ZU7T9HXA+5T3X26r5%}&tW{f{+y-_=ed{X2%h)y6kMT@=V+c8Jjd`n@h@qb zo99zJ$MSsURGP91=Hj`YZ;j^$9_{a?X?OEH!BYm?ah^e*2YDWXzWY^x;iK>2+=@jadL7(4y z#b1Zbp`VPADB?+6d4_+|PVRo+k#0QiPsT~)ucpF^-~N%s&+_Cfjr9Hxzk4$Nw)lss zmkZ@sGN!|sN4^W6LqL8q7E^(*12QhY4?GLJ27C+*reTtRg@9a?3CEd$=sSM?C)~1m4*&oF diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/__init__.py deleted file mode 100755 index b966dce..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from distutils.command.bdist import bdist -import sys - -if 'egg' not in bdist.format_commands: - bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") - bdist.format_commands.append('egg') - -del bdist, sys diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/alias.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/alias.py deleted file mode 100755 index 452a924..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/alias.py +++ /dev/null @@ -1,78 +0,0 @@ -from distutils.errors import DistutilsOptionError - -from setuptools.command.setopt import edit_config, option_base, config_file - - -def shquote(arg): - """Quote an argument for later parsing by shlex.split()""" - for c in '"', "'", "\\", "#": - if c in arg: - return repr(arg) - if arg.split() != [arg]: - return repr(arg) - return arg - - -class alias(option_base): - """Define a shortcut that invokes one or more commands""" - - description = "define a shortcut to invoke one or more commands" - command_consumes_arguments = True - - user_options = [ - ('remove', 'r', 'remove (unset) the alias'), - ] + option_base.user_options - - boolean_options = option_base.boolean_options + ['remove'] - - def initialize_options(self): - option_base.initialize_options(self) - self.args = None - self.remove = None - - def finalize_options(self): - option_base.finalize_options(self) - if self.remove and len(self.args) != 1: - raise DistutilsOptionError( - "Must specify exactly one argument (the alias name) when " - "using --remove" - ) - - def run(self): - aliases = self.distribution.get_option_dict('aliases') - - if not self.args: - print("Command Aliases") - print("---------------") - for alias in aliases: - print("setup.py alias", format_alias(alias, aliases)) - return - - elif len(self.args) == 1: - alias, = self.args - if self.remove: - command = None - elif alias in aliases: - print("setup.py alias", format_alias(alias, aliases)) - return - else: - print("No alias definition found for %r" % alias) - return - else: - alias = self.args[0] - command = ' '.join(map(shquote, self.args[1:])) - - edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run) - - -def format_alias(name, aliases): - source, command = aliases[name] - if source == config_file('global'): - source = '--global-config ' - elif source == config_file('user'): - source = '--user-config ' - elif source == config_file('local'): - source = '' - else: - source = '--filename=%r' % source - return source + name + ' ' + command diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/bdist_egg.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/bdist_egg.py deleted file mode 100755 index e6b1609..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/bdist_egg.py +++ /dev/null @@ -1,456 +0,0 @@ -"""setuptools.command.bdist_egg - -Build .egg distributions""" - -from distutils.dir_util import remove_tree, mkpath -from distutils import log -from types import CodeType -import sys -import os -import re -import textwrap -import marshal - -from pkg_resources import get_build_platform, Distribution, ensure_directory -from setuptools.extension import Library -from setuptools import Command - -from sysconfig import get_path, get_python_version - - -def _get_purelib(): - return get_path("purelib") - - -def strip_module(filename): - if '.' in filename: - filename = os.path.splitext(filename)[0] - if filename.endswith('module'): - filename = filename[:-6] - return filename - - -def sorted_walk(dir): - """Do os.walk in a reproducible way, - independent of indeterministic filesystem readdir order - """ - for base, dirs, files in os.walk(dir): - dirs.sort() - files.sort() - yield base, dirs, files - - -def write_stub(resource, pyfile): - _stub_template = textwrap.dedent(""" - def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, importlib.util - __file__ = pkg_resources.resource_filename(__name__, %r) - __loader__ = None; del __bootstrap__, __loader__ - spec = importlib.util.spec_from_file_location(__name__,__file__) - mod = importlib.util.module_from_spec(spec) - spec.loader.exec_module(mod) - __bootstrap__() - """).lstrip() - with open(pyfile, 'w') as f: - f.write(_stub_template % resource) - - -class bdist_egg(Command): - description = "create an \"egg\" distribution" - - user_options = [ - ('bdist-dir=', 'b', - "temporary directory for creating the distribution"), - ('plat-name=', 'p', "platform name to embed in generated filenames " - "(default: %s)" % get_build_platform()), - ('exclude-source-files', None, - "remove all .py files from the generated egg"), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ] - - boolean_options = [ - 'keep-temp', 'skip-build', 'exclude-source-files' - ] - - def initialize_options(self): - self.bdist_dir = None - self.plat_name = None - self.keep_temp = 0 - self.dist_dir = None - self.skip_build = 0 - self.egg_output = None - self.exclude_source_files = None - - def finalize_options(self): - ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") - self.egg_info = ei_cmd.egg_info - - if self.bdist_dir is None: - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'egg') - - if self.plat_name is None: - self.plat_name = get_build_platform() - - self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) - - if self.egg_output is None: - - # Compute filename of the output egg - basename = Distribution( - None, None, ei_cmd.egg_name, ei_cmd.egg_version, - get_python_version(), - self.distribution.has_ext_modules() and self.plat_name - ).egg_name() - - self.egg_output = os.path.join(self.dist_dir, basename + '.egg') - - def do_install_data(self): - # Hack for packages that install data to install's --install-lib - self.get_finalized_command('install').install_lib = self.bdist_dir - - site_packages = os.path.normcase(os.path.realpath(_get_purelib())) - old, self.distribution.data_files = self.distribution.data_files, [] - - for item in old: - if isinstance(item, tuple) and len(item) == 2: - if os.path.isabs(item[0]): - realpath = os.path.realpath(item[0]) - normalized = os.path.normcase(realpath) - if normalized == site_packages or normalized.startswith( - site_packages + os.sep - ): - item = realpath[len(site_packages) + 1:], item[1] - # XXX else: raise ??? - self.distribution.data_files.append(item) - - try: - log.info("installing package data to %s", self.bdist_dir) - self.call_command('install_data', force=0, root=None) - finally: - self.distribution.data_files = old - - def get_outputs(self): - return [self.egg_output] - - def call_command(self, cmdname, **kw): - """Invoke reinitialized command `cmdname` with keyword args""" - for dirname in INSTALL_DIRECTORY_ATTRS: - kw.setdefault(dirname, self.bdist_dir) - kw.setdefault('skip_build', self.skip_build) - kw.setdefault('dry_run', self.dry_run) - cmd = self.reinitialize_command(cmdname, **kw) - self.run_command(cmdname) - return cmd - - def run(self): # noqa: C901 # is too complex (14) # FIXME - # Generate metadata first - self.run_command("egg_info") - # We run install_lib before install_data, because some data hacks - # pull their data path from the install_lib command. - log.info("installing library code to %s", self.bdist_dir) - instcmd = self.get_finalized_command('install') - old_root = instcmd.root - instcmd.root = None - if self.distribution.has_c_libraries() and not self.skip_build: - self.run_command('build_clib') - cmd = self.call_command('install_lib', warn_dir=0) - instcmd.root = old_root - - all_outputs, ext_outputs = self.get_ext_outputs() - self.stubs = [] - to_compile = [] - for (p, ext_name) in enumerate(ext_outputs): - filename, ext = os.path.splitext(ext_name) - pyfile = os.path.join(self.bdist_dir, strip_module(filename) + - '.py') - self.stubs.append(pyfile) - log.info("creating stub loader for %s", ext_name) - if not self.dry_run: - write_stub(os.path.basename(ext_name), pyfile) - to_compile.append(pyfile) - ext_outputs[p] = ext_name.replace(os.sep, '/') - - if to_compile: - cmd.byte_compile(to_compile) - if self.distribution.data_files: - self.do_install_data() - - # Make the EGG-INFO directory - archive_root = self.bdist_dir - egg_info = os.path.join(archive_root, 'EGG-INFO') - self.mkpath(egg_info) - if self.distribution.scripts: - script_dir = os.path.join(egg_info, 'scripts') - log.info("installing scripts to %s", script_dir) - self.call_command('install_scripts', install_dir=script_dir, - no_ep=1) - - self.copy_metadata_to(egg_info) - native_libs = os.path.join(egg_info, "native_libs.txt") - if all_outputs: - log.info("writing %s", native_libs) - if not self.dry_run: - ensure_directory(native_libs) - libs_file = open(native_libs, 'wt') - libs_file.write('\n'.join(all_outputs)) - libs_file.write('\n') - libs_file.close() - elif os.path.isfile(native_libs): - log.info("removing %s", native_libs) - if not self.dry_run: - os.unlink(native_libs) - - write_safety_flag( - os.path.join(archive_root, 'EGG-INFO'), self.zip_safe() - ) - - if os.path.exists(os.path.join(self.egg_info, 'depends.txt')): - log.warn( - "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" - "Use the install_requires/extras_require setup() args instead." - ) - - if self.exclude_source_files: - self.zap_pyfiles() - - # Make the archive - make_zipfile(self.egg_output, archive_root, verbose=self.verbose, - dry_run=self.dry_run, mode=self.gen_header()) - if not self.keep_temp: - remove_tree(self.bdist_dir, dry_run=self.dry_run) - - # Add to 'Distribution.dist_files' so that the "upload" command works - getattr(self.distribution, 'dist_files', []).append( - ('bdist_egg', get_python_version(), self.egg_output)) - - def zap_pyfiles(self): - log.info("Removing .py files from temporary directory") - for base, dirs, files in walk_egg(self.bdist_dir): - for name in files: - path = os.path.join(base, name) - - if name.endswith('.py'): - log.debug("Deleting %s", path) - os.unlink(path) - - if base.endswith('__pycache__'): - path_old = path - - pattern = r'(?P.+)\.(?P[^.]+)\.pyc' - m = re.match(pattern, name) - path_new = os.path.join( - base, os.pardir, m.group('name') + '.pyc') - log.info( - "Renaming file from [%s] to [%s]" - % (path_old, path_new)) - try: - os.remove(path_new) - except OSError: - pass - os.rename(path_old, path_new) - - def zip_safe(self): - safe = getattr(self.distribution, 'zip_safe', None) - if safe is not None: - return safe - log.warn("zip_safe flag not set; analyzing archive contents...") - return analyze_egg(self.bdist_dir, self.stubs) - - def gen_header(self): - return 'w' - - def copy_metadata_to(self, target_dir): - "Copy metadata (egg info) to the target_dir" - # normalize the path (so that a forward-slash in egg_info will - # match using startswith below) - norm_egg_info = os.path.normpath(self.egg_info) - prefix = os.path.join(norm_egg_info, '') - for path in self.ei_cmd.filelist.files: - if path.startswith(prefix): - target = os.path.join(target_dir, path[len(prefix):]) - ensure_directory(target) - self.copy_file(path, target) - - def get_ext_outputs(self): - """Get a list of relative paths to C extensions in the output distro""" - - all_outputs = [] - ext_outputs = [] - - paths = {self.bdist_dir: ''} - for base, dirs, files in sorted_walk(self.bdist_dir): - for filename in files: - if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: - all_outputs.append(paths[base] + filename) - for filename in dirs: - paths[os.path.join(base, filename)] = (paths[base] + - filename + '/') - - if self.distribution.has_ext_modules(): - build_cmd = self.get_finalized_command('build_ext') - for ext in build_cmd.extensions: - if isinstance(ext, Library): - continue - fullname = build_cmd.get_ext_fullname(ext.name) - filename = build_cmd.get_ext_filename(fullname) - if not os.path.basename(filename).startswith('dl-'): - if os.path.exists(os.path.join(self.bdist_dir, filename)): - ext_outputs.append(filename) - - return all_outputs, ext_outputs - - -NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) - - -def walk_egg(egg_dir): - """Walk an unpacked egg's contents, skipping the metadata directory""" - walker = sorted_walk(egg_dir) - base, dirs, files = next(walker) - if 'EGG-INFO' in dirs: - dirs.remove('EGG-INFO') - yield base, dirs, files - for bdf in walker: - yield bdf - - -def analyze_egg(egg_dir, stubs): - # check for existing flag in EGG-INFO - for flag, fn in safety_flags.items(): - if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)): - return flag - if not can_scan(): - return False - safe = True - for base, dirs, files in walk_egg(egg_dir): - for name in files: - if name.endswith('.py') or name.endswith('.pyw'): - continue - elif name.endswith('.pyc') or name.endswith('.pyo'): - # always scan, even if we already know we're not safe - safe = scan_module(egg_dir, base, name, stubs) and safe - return safe - - -def write_safety_flag(egg_dir, safe): - # Write or remove zip safety flag file(s) - for flag, fn in safety_flags.items(): - fn = os.path.join(egg_dir, fn) - if os.path.exists(fn): - if safe is None or bool(safe) != flag: - os.unlink(fn) - elif safe is not None and bool(safe) == flag: - f = open(fn, 'wt') - f.write('\n') - f.close() - - -safety_flags = { - True: 'zip-safe', - False: 'not-zip-safe', -} - - -def scan_module(egg_dir, base, name, stubs): - """Check whether module possibly uses unsafe-for-zipfile stuff""" - - filename = os.path.join(base, name) - if filename[:-1] in stubs: - return True # Extension module - pkg = base[len(egg_dir) + 1:].replace(os.sep, '.') - module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0] - if sys.version_info < (3, 7): - skip = 12 # skip magic & date & file size - else: - skip = 16 # skip magic & reserved? & date & file size - f = open(filename, 'rb') - f.read(skip) - code = marshal.load(f) - f.close() - safe = True - symbols = dict.fromkeys(iter_symbols(code)) - for bad in ['__file__', '__path__']: - if bad in symbols: - log.warn("%s: module references %s", module, bad) - safe = False - if 'inspect' in symbols: - for bad in [ - 'getsource', 'getabsfile', 'getsourcefile', 'getfile' - 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', - 'getinnerframes', 'getouterframes', 'stack', 'trace' - ]: - if bad in symbols: - log.warn("%s: module MAY be using inspect.%s", module, bad) - safe = False - return safe - - -def iter_symbols(code): - """Yield names and strings used by `code` and its nested code objects""" - for name in code.co_names: - yield name - for const in code.co_consts: - if isinstance(const, str): - yield const - elif isinstance(const, CodeType): - for name in iter_symbols(const): - yield name - - -def can_scan(): - if not sys.platform.startswith('java') and sys.platform != 'cli': - # CPython, PyPy, etc. - return True - log.warn("Unable to analyze compiled code on this platform.") - log.warn("Please ask the author to include a 'zip_safe'" - " setting (either True or False) in the package's setup.py") - - -# Attribute names of options for commands that might need to be convinced to -# install to the egg build directory - -INSTALL_DIRECTORY_ATTRS = [ - 'install_lib', 'install_dir', 'install_data', 'install_base' -] - - -def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True, - mode='w'): - """Create a zip file from all the files under 'base_dir'. The output - zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" - Python module (if available) or the InfoZIP "zip" utility (if installed - and found on the default search path). If neither tool is available, - raises DistutilsExecError. Returns the name of the output zip file. - """ - import zipfile - - mkpath(os.path.dirname(zip_filename), dry_run=dry_run) - log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) - - def visit(z, dirname, names): - for name in names: - path = os.path.normpath(os.path.join(dirname, name)) - if os.path.isfile(path): - p = path[len(base_dir) + 1:] - if not dry_run: - z.write(path, p) - log.debug("adding '%s'", p) - - compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED - if not dry_run: - z = zipfile.ZipFile(zip_filename, mode, compression=compression) - for dirname, dirs, files in sorted_walk(base_dir): - visit(z, dirname, files) - z.close() - else: - for dirname, dirs, files in sorted_walk(base_dir): - visit(None, dirname, files) - return zip_filename diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/bdist_rpm.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/bdist_rpm.py deleted file mode 100755 index 98bf5de..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/bdist_rpm.py +++ /dev/null @@ -1,40 +0,0 @@ -import distutils.command.bdist_rpm as orig -import warnings - -from setuptools import SetuptoolsDeprecationWarning - - -class bdist_rpm(orig.bdist_rpm): - """ - Override the default bdist_rpm behavior to do the following: - - 1. Run egg_info to ensure the name and version are properly calculated. - 2. Always run 'install' using --single-version-externally-managed to - disable eggs in RPM distributions. - """ - - def run(self): - warnings.warn( - "bdist_rpm is deprecated and will be removed in a future " - "version. Use bdist_wheel (wheel packages) instead.", - SetuptoolsDeprecationWarning, - ) - - # ensure distro name is up-to-date - self.run_command('egg_info') - - orig.bdist_rpm.run(self) - - def _make_spec_file(self): - spec = orig.bdist_rpm._make_spec_file(self) - spec = [ - line.replace( - "setup.py install ", - "setup.py install --single-version-externally-managed " - ).replace( - "%setup", - "%setup -n %{name}-%{unmangled_version}" - ) - for line in spec - ] - return spec diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_clib.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_clib.py deleted file mode 100755 index 67ce244..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_clib.py +++ /dev/null @@ -1,101 +0,0 @@ -import distutils.command.build_clib as orig -from distutils.errors import DistutilsSetupError -from distutils import log -from setuptools.dep_util import newer_pairwise_group - - -class build_clib(orig.build_clib): - """ - Override the default build_clib behaviour to do the following: - - 1. Implement a rudimentary timestamp-based dependency system - so 'compile()' doesn't run every time. - 2. Add more keys to the 'build_info' dictionary: - * obj_deps - specify dependencies for each object compiled. - this should be a dictionary mapping a key - with the source filename to a list of - dependencies. Use an empty string for global - dependencies. - * cflags - specify a list of additional flags to pass to - the compiler. - """ - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name) - sources = list(sources) - - log.info("building '%s' library", lib_name) - - # Make sure everything is the correct type. - # obj_deps should be a dictionary of keys as sources - # and a list/tuple of files that are its dependencies. - obj_deps = build_info.get('obj_deps', dict()) - if not isinstance(obj_deps, dict): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'obj_deps' must be a dictionary of " - "type 'source: list'" % lib_name) - dependencies = [] - - # Get the global dependencies that are specified by the '' key. - # These will go into every source's dependency list. - global_deps = obj_deps.get('', list()) - if not isinstance(global_deps, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'obj_deps' must be a dictionary of " - "type 'source: list'" % lib_name) - - # Build the list to be used by newer_pairwise_group - # each source will be auto-added to its dependencies. - for source in sources: - src_deps = [source] - src_deps.extend(global_deps) - extra_deps = obj_deps.get(source, list()) - if not isinstance(extra_deps, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'obj_deps' must be a dictionary of " - "type 'source: list'" % lib_name) - src_deps.extend(extra_deps) - dependencies.append(src_deps) - - expected_objects = self.compiler.object_filenames( - sources, - output_dir=self.build_temp, - ) - - if ( - newer_pairwise_group(dependencies, expected_objects) - != ([], []) - ): - # First, compile the source code to object files in the library - # directory. (This should probably change to putting object - # files in a temporary build directory.) - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - cflags = build_info.get('cflags') - self.compiler.compile( - sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - extra_postargs=cflags, - debug=self.debug - ) - - # Now "link" the object files together into a static library. - # (On Unix at least, this isn't really linking -- it just - # builds an archive. Whatever.) - self.compiler.create_static_lib( - expected_objects, - lib_name, - output_dir=self.build_clib, - debug=self.debug - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_ext.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_ext.py deleted file mode 100755 index c59eff8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_ext.py +++ /dev/null @@ -1,328 +0,0 @@ -import os -import sys -import itertools -from importlib.machinery import EXTENSION_SUFFIXES -from distutils.command.build_ext import build_ext as _du_build_ext -from distutils.file_util import copy_file -from distutils.ccompiler import new_compiler -from distutils.sysconfig import customize_compiler, get_config_var -from distutils.errors import DistutilsError -from distutils import log - -from setuptools.extension import Library - -try: - # Attempt to use Cython for building extensions, if available - from Cython.Distutils.build_ext import build_ext as _build_ext - # Additionally, assert that the compiler module will load - # also. Ref #1229. - __import__('Cython.Compiler.Main') -except ImportError: - _build_ext = _du_build_ext - -# make sure _config_vars is initialized -get_config_var("LDSHARED") -from distutils.sysconfig import _config_vars as _CONFIG_VARS # noqa - - -def _customize_compiler_for_shlib(compiler): - if sys.platform == "darwin": - # building .dylib requires additional compiler flags on OSX; here we - # temporarily substitute the pyconfig.h variables so that distutils' - # 'customize_compiler' uses them before we build the shared libraries. - tmp = _CONFIG_VARS.copy() - try: - # XXX Help! I don't have any idea whether these are right... - _CONFIG_VARS['LDSHARED'] = ( - "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup") - _CONFIG_VARS['CCSHARED'] = " -dynamiclib" - _CONFIG_VARS['SO'] = ".dylib" - customize_compiler(compiler) - finally: - _CONFIG_VARS.clear() - _CONFIG_VARS.update(tmp) - else: - customize_compiler(compiler) - - -have_rtld = False -use_stubs = False -libtype = 'shared' - -if sys.platform == "darwin": - use_stubs = True -elif os.name != 'nt': - try: - import dl - use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW') - except ImportError: - pass - - -def if_dl(s): - return s if have_rtld else '' - - -def get_abi3_suffix(): - """Return the file extension for an abi3-compliant Extension()""" - for suffix in EXTENSION_SUFFIXES: - if '.abi3' in suffix: # Unix - return suffix - elif suffix == '.pyd': # Windows - return suffix - - -class build_ext(_build_ext): - def run(self): - """Build extensions in build directory, then copy if --inplace""" - old_inplace, self.inplace = self.inplace, 0 - _build_ext.run(self) - self.inplace = old_inplace - if old_inplace: - self.copy_extensions_to_source() - - def copy_extensions_to_source(self): - build_py = self.get_finalized_command('build_py') - for ext in self.extensions: - fullname = self.get_ext_fullname(ext.name) - filename = self.get_ext_filename(fullname) - modpath = fullname.split('.') - package = '.'.join(modpath[:-1]) - package_dir = build_py.get_package_dir(package) - dest_filename = os.path.join(package_dir, - os.path.basename(filename)) - src_filename = os.path.join(self.build_lib, filename) - - # Always copy, even if source is older than destination, to ensure - # that the right extensions for the current Python/platform are - # used. - copy_file( - src_filename, dest_filename, verbose=self.verbose, - dry_run=self.dry_run - ) - if ext._needs_stub: - self.write_stub(package_dir or os.curdir, ext, True) - - def get_ext_filename(self, fullname): - so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX') - if so_ext: - filename = os.path.join(*fullname.split('.')) + so_ext - else: - filename = _build_ext.get_ext_filename(self, fullname) - so_ext = get_config_var('EXT_SUFFIX') - - if fullname in self.ext_map: - ext = self.ext_map[fullname] - use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix() - if use_abi3: - filename = filename[:-len(so_ext)] - so_ext = get_abi3_suffix() - filename = filename + so_ext - if isinstance(ext, Library): - fn, ext = os.path.splitext(filename) - return self.shlib_compiler.library_filename(fn, libtype) - elif use_stubs and ext._links_to_dynamic: - d, fn = os.path.split(filename) - return os.path.join(d, 'dl-' + fn) - return filename - - def initialize_options(self): - _build_ext.initialize_options(self) - self.shlib_compiler = None - self.shlibs = [] - self.ext_map = {} - - def finalize_options(self): - _build_ext.finalize_options(self) - self.extensions = self.extensions or [] - self.check_extensions_list(self.extensions) - self.shlibs = [ext for ext in self.extensions - if isinstance(ext, Library)] - if self.shlibs: - self.setup_shlib_compiler() - for ext in self.extensions: - ext._full_name = self.get_ext_fullname(ext.name) - for ext in self.extensions: - fullname = ext._full_name - self.ext_map[fullname] = ext - - # distutils 3.1 will also ask for module names - # XXX what to do with conflicts? - self.ext_map[fullname.split('.')[-1]] = ext - - ltd = self.shlibs and self.links_to_dynamic(ext) or False - ns = ltd and use_stubs and not isinstance(ext, Library) - ext._links_to_dynamic = ltd - ext._needs_stub = ns - filename = ext._file_name = self.get_ext_filename(fullname) - libdir = os.path.dirname(os.path.join(self.build_lib, filename)) - if ltd and libdir not in ext.library_dirs: - ext.library_dirs.append(libdir) - if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: - ext.runtime_library_dirs.append(os.curdir) - - def setup_shlib_compiler(self): - compiler = self.shlib_compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=self.force - ) - _customize_compiler_for_shlib(compiler) - - if self.include_dirs is not None: - compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name, value) in self.define: - compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - compiler.undefine_macro(macro) - if self.libraries is not None: - compiler.set_libraries(self.libraries) - if self.library_dirs is not None: - compiler.set_library_dirs(self.library_dirs) - if self.rpath is not None: - compiler.set_runtime_library_dirs(self.rpath) - if self.link_objects is not None: - compiler.set_link_objects(self.link_objects) - - # hack so distutils' build_extension() builds a library instead - compiler.link_shared_object = link_shared_object.__get__(compiler) - - def get_export_symbols(self, ext): - if isinstance(ext, Library): - return ext.export_symbols - return _build_ext.get_export_symbols(self, ext) - - def build_extension(self, ext): - ext._convert_pyx_sources_to_lang() - _compiler = self.compiler - try: - if isinstance(ext, Library): - self.compiler = self.shlib_compiler - _build_ext.build_extension(self, ext) - if ext._needs_stub: - cmd = self.get_finalized_command('build_py').build_lib - self.write_stub(cmd, ext) - finally: - self.compiler = _compiler - - def links_to_dynamic(self, ext): - """Return true if 'ext' links to a dynamic lib in the same package""" - # XXX this should check to ensure the lib is actually being built - # XXX as dynamic, and not just using a locally-found version or a - # XXX static-compiled version - libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) - pkg = '.'.join(ext._full_name.split('.')[:-1] + ['']) - return any(pkg + libname in libnames for libname in ext.libraries) - - def get_outputs(self): - return _build_ext.get_outputs(self) + self.__get_stubs_outputs() - - def __get_stubs_outputs(self): - # assemble the base name for each extension that needs a stub - ns_ext_bases = ( - os.path.join(self.build_lib, *ext._full_name.split('.')) - for ext in self.extensions - if ext._needs_stub - ) - # pair each base with the extension - pairs = itertools.product(ns_ext_bases, self.__get_output_extensions()) - return list(base + fnext for base, fnext in pairs) - - def __get_output_extensions(self): - yield '.py' - yield '.pyc' - if self.get_finalized_command('build_py').optimize: - yield '.pyo' - - def write_stub(self, output_dir, ext, compile=False): - log.info("writing stub loader for %s to %s", ext._full_name, - output_dir) - stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) + - '.py') - if compile and os.path.exists(stub_file): - raise DistutilsError(stub_file + " already exists! Please delete.") - if not self.dry_run: - f = open(stub_file, 'w') - f.write( - '\n'.join([ - "def __bootstrap__():", - " global __bootstrap__, __file__, __loader__", - " import sys, os, pkg_resources, importlib.util" + - if_dl(", dl"), - " __file__ = pkg_resources.resource_filename" - "(__name__,%r)" - % os.path.basename(ext._file_name), - " del __bootstrap__", - " if '__loader__' in globals():", - " del __loader__", - if_dl(" old_flags = sys.getdlopenflags()"), - " old_dir = os.getcwd()", - " try:", - " os.chdir(os.path.dirname(__file__))", - if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), - " spec = importlib.util.spec_from_file_location(", - " __name__, __file__)", - " mod = importlib.util.module_from_spec(spec)", - " spec.loader.exec_module(mod)", - " finally:", - if_dl(" sys.setdlopenflags(old_flags)"), - " os.chdir(old_dir)", - "__bootstrap__()", - "" # terminal \n - ]) - ) - f.close() - if compile: - from distutils.util import byte_compile - - byte_compile([stub_file], optimize=0, - force=True, dry_run=self.dry_run) - optimize = self.get_finalized_command('install_lib').optimize - if optimize > 0: - byte_compile([stub_file], optimize=optimize, - force=True, dry_run=self.dry_run) - if os.path.exists(stub_file) and not self.dry_run: - os.unlink(stub_file) - - -if use_stubs or os.name == 'nt': - # Build shared libraries - # - def link_shared_object( - self, objects, output_libname, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, export_symbols=None, - debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, - target_lang=None): - self.link( - self.SHARED_LIBRARY, objects, output_libname, - output_dir, libraries, library_dirs, runtime_library_dirs, - export_symbols, debug, extra_preargs, extra_postargs, - build_temp, target_lang - ) -else: - # Build static libraries everywhere else - libtype = 'static' - - def link_shared_object( - self, objects, output_libname, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, export_symbols=None, - debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, - target_lang=None): - # XXX we need to either disallow these attrs on Library instances, - # or warn/abort here if set, or something... - # libraries=None, library_dirs=None, runtime_library_dirs=None, - # export_symbols=None, extra_preargs=None, extra_postargs=None, - # build_temp=None - - assert output_dir is None # distutils build_ext doesn't pass this - output_dir, filename = os.path.split(output_libname) - basename, ext = os.path.splitext(filename) - if self.library_filename("x").startswith('lib'): - # strip 'lib' prefix; this is kludgy if some platform uses - # a different prefix - basename = basename[3:] - - self.create_static_lib( - objects, basename, output_dir, debug, target_lang - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_py.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_py.py deleted file mode 100755 index c3fdc09..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/build_py.py +++ /dev/null @@ -1,242 +0,0 @@ -from glob import glob -from distutils.util import convert_path -import distutils.command.build_py as orig -import os -import fnmatch -import textwrap -import io -import distutils.errors -import itertools -import stat -from setuptools.extern.more_itertools import unique_everseen - - -def make_writable(target): - os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE) - - -class build_py(orig.build_py): - """Enhanced 'build_py' command that includes data files with packages - - The data files are specified via a 'package_data' argument to 'setup()'. - See 'setuptools.dist.Distribution' for more details. - - Also, this version of the 'build_py' command allows you to specify both - 'py_modules' and 'packages' in the same setup operation. - """ - - def finalize_options(self): - orig.build_py.finalize_options(self) - self.package_data = self.distribution.package_data - self.exclude_package_data = self.distribution.exclude_package_data or {} - if 'data_files' in self.__dict__: - del self.__dict__['data_files'] - self.__updated_files = [] - - def run(self): - """Build modules, packages, and copy data files to build directory""" - if not self.py_modules and not self.packages: - return - - if self.py_modules: - self.build_modules() - - if self.packages: - self.build_packages() - self.build_package_data() - - # Only compile actual .py files, using our base class' idea of what our - # output files are. - self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0)) - - def __getattr__(self, attr): - "lazily compute data files" - if attr == 'data_files': - self.data_files = self._get_data_files() - return self.data_files - return orig.build_py.__getattr__(self, attr) - - def build_module(self, module, module_file, package): - outfile, copied = orig.build_py.build_module(self, module, module_file, package) - if copied: - self.__updated_files.append(outfile) - return outfile, copied - - def _get_data_files(self): - """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" - self.analyze_manifest() - return list(map(self._get_pkg_data_files, self.packages or ())) - - def get_data_files_without_manifest(self): - """ - Generate list of ``(package,src_dir,build_dir,filenames)`` tuples, - but without triggering any attempt to analyze or build the manifest. - """ - # Prevent eventual errors from unset `manifest_files` - # (that would otherwise be set by `analyze_manifest`) - self.__dict__.setdefault('manifest_files', {}) - return list(map(self._get_pkg_data_files, self.packages or ())) - - def _get_pkg_data_files(self, package): - # Locate package source directory - src_dir = self.get_package_dir(package) - - # Compute package build directory - build_dir = os.path.join(*([self.build_lib] + package.split('.'))) - - # Strip directory from globbed filenames - filenames = [ - os.path.relpath(file, src_dir) - for file in self.find_data_files(package, src_dir) - ] - return package, src_dir, build_dir, filenames - - def find_data_files(self, package, src_dir): - """Return filenames for package's data files in 'src_dir'""" - patterns = self._get_platform_patterns( - self.package_data, - package, - src_dir, - ) - globs_expanded = map(glob, patterns) - # flatten the expanded globs into an iterable of matches - globs_matches = itertools.chain.from_iterable(globs_expanded) - glob_files = filter(os.path.isfile, globs_matches) - files = itertools.chain( - self.manifest_files.get(package, []), - glob_files, - ) - return self.exclude_data_files(package, src_dir, files) - - def build_package_data(self): - """Copy data files into build directory""" - for package, src_dir, build_dir, filenames in self.data_files: - for filename in filenames: - target = os.path.join(build_dir, filename) - self.mkpath(os.path.dirname(target)) - srcfile = os.path.join(src_dir, filename) - outf, copied = self.copy_file(srcfile, target) - make_writable(target) - srcfile = os.path.abspath(srcfile) - - def analyze_manifest(self): - self.manifest_files = mf = {} - if not self.distribution.include_package_data: - return - src_dirs = {} - for package in self.packages or (): - # Locate package source directory - src_dirs[assert_relative(self.get_package_dir(package))] = package - - self.run_command('egg_info') - ei_cmd = self.get_finalized_command('egg_info') - for path in ei_cmd.filelist.files: - d, f = os.path.split(assert_relative(path)) - prev = None - oldf = f - while d and d != prev and d not in src_dirs: - prev = d - d, df = os.path.split(d) - f = os.path.join(df, f) - if d in src_dirs: - if path.endswith('.py') and f == oldf: - continue # it's a module, not data - mf.setdefault(src_dirs[d], []).append(path) - - def get_data_files(self): - pass # Lazily compute data files in _get_data_files() function. - - def check_package(self, package, package_dir): - """Check namespace packages' __init__ for declare_namespace""" - try: - return self.packages_checked[package] - except KeyError: - pass - - init_py = orig.build_py.check_package(self, package, package_dir) - self.packages_checked[package] = init_py - - if not init_py or not self.distribution.namespace_packages: - return init_py - - for pkg in self.distribution.namespace_packages: - if pkg == package or pkg.startswith(package + '.'): - break - else: - return init_py - - with io.open(init_py, 'rb') as f: - contents = f.read() - if b'declare_namespace' not in contents: - raise distutils.errors.DistutilsError( - "Namespace package problem: %s is a namespace package, but " - "its\n__init__.py does not call declare_namespace()! Please " - 'fix it.\n(See the setuptools manual under ' - '"Namespace Packages" for details.)\n"' % (package,) - ) - return init_py - - def initialize_options(self): - self.packages_checked = {} - orig.build_py.initialize_options(self) - - def get_package_dir(self, package): - res = orig.build_py.get_package_dir(self, package) - if self.distribution.src_root is not None: - return os.path.join(self.distribution.src_root, res) - return res - - def exclude_data_files(self, package, src_dir, files): - """Filter filenames for package's data files in 'src_dir'""" - files = list(files) - patterns = self._get_platform_patterns( - self.exclude_package_data, - package, - src_dir, - ) - match_groups = (fnmatch.filter(files, pattern) for pattern in patterns) - # flatten the groups of matches into an iterable of matches - matches = itertools.chain.from_iterable(match_groups) - bad = set(matches) - keepers = (fn for fn in files if fn not in bad) - # ditch dupes - return list(unique_everseen(keepers)) - - @staticmethod - def _get_platform_patterns(spec, package, src_dir): - """ - yield platform-specific path patterns (suitable for glob - or fn_match) from a glob-based spec (such as - self.package_data or self.exclude_package_data) - matching package in src_dir. - """ - raw_patterns = itertools.chain( - spec.get('', []), - spec.get(package, []), - ) - return ( - # Each pattern has to be converted to a platform-specific path - os.path.join(src_dir, convert_path(pattern)) - for pattern in raw_patterns - ) - - -def assert_relative(path): - if not os.path.isabs(path): - return path - from distutils.errors import DistutilsSetupError - - msg = ( - textwrap.dedent( - """ - Error: setup script specifies an absolute path: - - %s - - setup() arguments must *always* be /-separated paths relative to the - setup.py directory, *never* absolute paths. - """ - ).lstrip() - % path - ) - raise DistutilsSetupError(msg) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/develop.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/develop.py deleted file mode 100755 index 24fb0a7..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/develop.py +++ /dev/null @@ -1,193 +0,0 @@ -from distutils.util import convert_path -from distutils import log -from distutils.errors import DistutilsError, DistutilsOptionError -import os -import glob -import io - -import pkg_resources -from setuptools.command.easy_install import easy_install -from setuptools import namespaces -import setuptools - - -class develop(namespaces.DevelopInstaller, easy_install): - """Set up package for development""" - - description = "install package in 'development mode'" - - user_options = easy_install.user_options + [ - ("uninstall", "u", "Uninstall this source package"), - ("egg-path=", None, "Set the path to be used in the .egg-link file"), - ] - - boolean_options = easy_install.boolean_options + ['uninstall'] - - command_consumes_arguments = False # override base - - def run(self): - if self.uninstall: - self.multi_version = True - self.uninstall_link() - self.uninstall_namespaces() - else: - self.install_for_development() - self.warn_deprecated_options() - - def initialize_options(self): - self.uninstall = None - self.egg_path = None - easy_install.initialize_options(self) - self.setup_path = None - self.always_copy_from = '.' # always copy eggs installed in curdir - - def finalize_options(self): - ei = self.get_finalized_command("egg_info") - if ei.broken_egg_info: - template = "Please rename %r to %r before using 'develop'" - args = ei.egg_info, ei.broken_egg_info - raise DistutilsError(template % args) - self.args = [ei.egg_name] - - easy_install.finalize_options(self) - self.expand_basedirs() - self.expand_dirs() - # pick up setup-dir .egg files only: no .egg-info - self.package_index.scan(glob.glob('*.egg')) - - egg_link_fn = ei.egg_name + '.egg-link' - self.egg_link = os.path.join(self.install_dir, egg_link_fn) - self.egg_base = ei.egg_base - if self.egg_path is None: - self.egg_path = os.path.abspath(ei.egg_base) - - target = pkg_resources.normalize_path(self.egg_base) - egg_path = pkg_resources.normalize_path( - os.path.join(self.install_dir, self.egg_path) - ) - if egg_path != target: - raise DistutilsOptionError( - "--egg-path must be a relative path from the install" - " directory to " + target - ) - - # Make a distribution for the package's source - self.dist = pkg_resources.Distribution( - target, - pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)), - project_name=ei.egg_name, - ) - - self.setup_path = self._resolve_setup_path( - self.egg_base, - self.install_dir, - self.egg_path, - ) - - @staticmethod - def _resolve_setup_path(egg_base, install_dir, egg_path): - """ - Generate a path from egg_base back to '.' where the - setup script resides and ensure that path points to the - setup path from $install_dir/$egg_path. - """ - path_to_setup = egg_base.replace(os.sep, '/').rstrip('/') - if path_to_setup != os.curdir: - path_to_setup = '../' * (path_to_setup.count('/') + 1) - resolved = pkg_resources.normalize_path( - os.path.join(install_dir, egg_path, path_to_setup) - ) - if resolved != pkg_resources.normalize_path(os.curdir): - raise DistutilsOptionError( - "Can't get a consistent path to setup script from" - " installation directory", - resolved, - pkg_resources.normalize_path(os.curdir), - ) - return path_to_setup - - def install_for_development(self): - self.run_command('egg_info') - - # Build extensions in-place - self.reinitialize_command('build_ext', inplace=1) - self.run_command('build_ext') - - if setuptools.bootstrap_install_from: - self.easy_install(setuptools.bootstrap_install_from) - setuptools.bootstrap_install_from = None - - self.install_namespaces() - - # create an .egg-link in the installation dir, pointing to our egg - log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) - if not self.dry_run: - with open(self.egg_link, "w") as f: - f.write(self.egg_path + "\n" + self.setup_path) - # postprocess the installed distro, fixing up .pth, installing scripts, - # and handling requirements - self.process_distribution(None, self.dist, not self.no_deps) - - def uninstall_link(self): - if os.path.exists(self.egg_link): - log.info("Removing %s (link to %s)", self.egg_link, self.egg_base) - egg_link_file = open(self.egg_link) - contents = [line.rstrip() for line in egg_link_file] - egg_link_file.close() - if contents not in ([self.egg_path], [self.egg_path, self.setup_path]): - log.warn("Link points to %s: uninstall aborted", contents) - return - if not self.dry_run: - os.unlink(self.egg_link) - if not self.dry_run: - self.update_pth(self.dist) # remove any .pth link to us - if self.distribution.scripts: - # XXX should also check for entry point scripts! - log.warn("Note: you must uninstall or replace scripts manually!") - - def install_egg_scripts(self, dist): - if dist is not self.dist: - # Installing a dependency, so fall back to normal behavior - return easy_install.install_egg_scripts(self, dist) - - # create wrapper scripts in the script dir, pointing to dist.scripts - - # new-style... - self.install_wrapper_scripts(dist) - - # ...and old-style - for script_name in self.distribution.scripts or []: - script_path = os.path.abspath(convert_path(script_name)) - script_name = os.path.basename(script_path) - with io.open(script_path) as strm: - script_text = strm.read() - self.install_script(dist, script_name, script_text, script_path) - - def install_wrapper_scripts(self, dist): - dist = VersionlessRequirement(dist) - return easy_install.install_wrapper_scripts(self, dist) - - -class VersionlessRequirement: - """ - Adapt a pkg_resources.Distribution to simply return the project - name as the 'requirement' so that scripts will work across - multiple versions. - - >>> from pkg_resources import Distribution - >>> dist = Distribution(project_name='foo', version='1.0') - >>> str(dist.as_requirement()) - 'foo==1.0' - >>> adapted_dist = VersionlessRequirement(dist) - >>> str(adapted_dist.as_requirement()) - 'foo' - """ - - def __init__(self, dist): - self.__dist = dist - - def __getattr__(self, name): - return getattr(self.__dist, name) - - def as_requirement(self): - return self.project_name diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/dist_info.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/dist_info.py deleted file mode 100755 index c45258f..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/dist_info.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Create a dist_info directory -As defined in the wheel specification -""" - -import os - -from distutils.core import Command -from distutils import log - - -class dist_info(Command): - - description = 'create a .dist-info directory' - - user_options = [ - ('egg-base=', 'e', "directory containing .egg-info directories" - " (default: top of the source tree)"), - ] - - def initialize_options(self): - self.egg_base = None - - def finalize_options(self): - pass - - def run(self): - egg_info = self.get_finalized_command('egg_info') - egg_info.egg_base = self.egg_base - egg_info.finalize_options() - egg_info.run() - dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info' - log.info("creating '{}'".format(os.path.abspath(dist_info_dir))) - - bdist_wheel = self.get_finalized_command('bdist_wheel') - bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/easy_install.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/easy_install.py deleted file mode 100755 index ef1a9b2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/easy_install.py +++ /dev/null @@ -1,2317 +0,0 @@ -""" -Easy Install ------------- - -A tool for doing automatic download/extract/build of distutils-based Python -packages. For detailed documentation, see the accompanying EasyInstall.txt -file, or visit the `EasyInstall home page`__. - -__ https://setuptools.pypa.io/en/latest/deprecated/easy_install.html - -""" - -from glob import glob -from distutils.util import get_platform -from distutils.util import convert_path, subst_vars -from distutils.errors import ( - DistutilsArgError, DistutilsOptionError, - DistutilsError, DistutilsPlatformError, -) -from distutils import log, dir_util -from distutils.command.build_scripts import first_line_re -from distutils.spawn import find_executable -from distutils.command import install -import sys -import os -import zipimport -import shutil -import tempfile -import zipfile -import re -import stat -import random -import textwrap -import warnings -import site -import struct -import contextlib -import subprocess -import shlex -import io -import configparser -import sysconfig - - -from sysconfig import get_path - -from setuptools import SetuptoolsDeprecationWarning - -from setuptools import Command -from setuptools.sandbox import run_setup -from setuptools.command import setopt -from setuptools.archive_util import unpack_archive -from setuptools.package_index import ( - PackageIndex, parse_requirement_arg, URL_SCHEME, -) -from setuptools.command import bdist_egg, egg_info -from setuptools.wheel import Wheel -from pkg_resources import ( - yield_lines, normalize_path, resource_string, ensure_directory, - get_distribution, find_distributions, Environment, Requirement, - Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, - VersionConflict, DEVELOP_DIST, -) -import pkg_resources - -# Turn on PEP440Warnings -warnings.filterwarnings("default", category=pkg_resources.PEP440Warning) - -__all__ = [ - 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', - 'get_exe_prefixes', -] - - -def is_64bit(): - return struct.calcsize("P") == 8 - - -def samefile(p1, p2): - """ - Determine if two paths reference the same file. - - Augments os.path.samefile to work on Windows and - suppresses errors if the path doesn't exist. - """ - both_exist = os.path.exists(p1) and os.path.exists(p2) - use_samefile = hasattr(os.path, 'samefile') and both_exist - if use_samefile: - return os.path.samefile(p1, p2) - norm_p1 = os.path.normpath(os.path.normcase(p1)) - norm_p2 = os.path.normpath(os.path.normcase(p2)) - return norm_p1 == norm_p2 - - -def _to_bytes(s): - return s.encode('utf8') - - -def isascii(s): - try: - s.encode('ascii') - return True - except UnicodeError: - return False - - -def _one_liner(text): - return textwrap.dedent(text).strip().replace('\n', '; ') - - -class easy_install(Command): - """Manage a download/build/install process""" - description = "Find/get/install Python packages" - command_consumes_arguments = True - - user_options = [ - ('prefix=', None, "installation prefix"), - ("zip-ok", "z", "install package as a zipfile"), - ("multi-version", "m", "make apps have to require() a version"), - ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"), - ("install-dir=", "d", "install package to DIR"), - ("script-dir=", "s", "install scripts to DIR"), - ("exclude-scripts", "x", "Don't install scripts"), - ("always-copy", "a", "Copy all needed packages to install dir"), - ("index-url=", "i", "base URL of Python Package Index"), - ("find-links=", "f", "additional URL(s) to search for packages"), - ("build-directory=", "b", - "download/extract/build in DIR; keep the results"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - ('record=', None, - "filename in which to record list of installed files"), - ('always-unzip', 'Z', "don't install as a zipfile, no matter what"), - ('site-dirs=', 'S', "list of directories where .pth files work"), - ('editable', 'e', "Install specified packages in editable form"), - ('no-deps', 'N', "don't install dependencies"), - ('allow-hosts=', 'H', "pattern(s) that hostnames must match"), - ('local-snapshots-ok', 'l', - "allow building eggs from local checkouts"), - ('version', None, "print version information and exit"), - ('no-find-links', None, - "Don't load find-links defined in packages being installed"), - ('user', None, "install in user site-package '%s'" % site.USER_SITE) - ] - boolean_options = [ - 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy', - 'editable', - 'no-deps', 'local-snapshots-ok', 'version', - 'user' - ] - - negative_opt = {'always-unzip': 'zip-ok'} - create_index = PackageIndex - - def initialize_options(self): - warnings.warn( - "easy_install command is deprecated. " - "Use build and pip and other standards-based tools.", - EasyInstallDeprecationWarning, - ) - - # the --user option seems to be an opt-in one, - # so the default should be False. - self.user = 0 - self.zip_ok = self.local_snapshots_ok = None - self.install_dir = self.script_dir = self.exclude_scripts = None - self.index_url = None - self.find_links = None - self.build_directory = None - self.args = None - self.optimize = self.record = None - self.upgrade = self.always_copy = self.multi_version = None - self.editable = self.no_deps = self.allow_hosts = None - self.root = self.prefix = self.no_report = None - self.version = None - self.install_purelib = None # for pure module distributions - self.install_platlib = None # non-pure (dists w/ extensions) - self.install_headers = None # for C/C++ headers - self.install_lib = None # set to either purelib or platlib - self.install_scripts = None - self.install_data = None - self.install_base = None - self.install_platbase = None - if site.ENABLE_USER_SITE: - self.install_userbase = site.USER_BASE - self.install_usersite = site.USER_SITE - else: - self.install_userbase = None - self.install_usersite = None - self.no_find_links = None - - # Options not specifiable via command line - self.package_index = None - self.pth_file = self.always_copy_from = None - self.site_dirs = None - self.installed_projects = {} - # Always read easy_install options, even if we are subclassed, or have - # an independent instance created. This ensures that defaults will - # always come from the standard configuration file(s)' "easy_install" - # section, even if this is a "develop" or "install" command, or some - # other embedding. - self._dry_run = None - self.verbose = self.distribution.verbose - self.distribution._set_command_options( - self, self.distribution.get_option_dict('easy_install') - ) - - def delete_blockers(self, blockers): - extant_blockers = ( - filename for filename in blockers - if os.path.exists(filename) or os.path.islink(filename) - ) - list(map(self._delete_path, extant_blockers)) - - def _delete_path(self, path): - log.info("Deleting %s", path) - if self.dry_run: - return - - is_tree = os.path.isdir(path) and not os.path.islink(path) - remover = rmtree if is_tree else os.unlink - remover(path) - - @staticmethod - def _render_version(): - """ - Render the Setuptools version and installation details, then exit. - """ - ver = '{}.{}'.format(*sys.version_info) - dist = get_distribution('setuptools') - tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})' - print(tmpl.format(**locals())) - raise SystemExit() - - def finalize_options(self): # noqa: C901 # is too complex (25) # FIXME - self.version and self._render_version() - - py_version = sys.version.split()[0] - - self.config_vars = dict(sysconfig.get_config_vars()) - - self.config_vars.update({ - 'dist_name': self.distribution.get_name(), - 'dist_version': self.distribution.get_version(), - 'dist_fullname': self.distribution.get_fullname(), - 'py_version': py_version, - 'py_version_short': f'{sys.version_info.major}.{sys.version_info.minor}', - 'py_version_nodot': f'{sys.version_info.major}{sys.version_info.minor}', - 'sys_prefix': self.config_vars['prefix'], - 'sys_exec_prefix': self.config_vars['exec_prefix'], - # Only python 3.2+ has abiflags - 'abiflags': getattr(sys, 'abiflags', ''), - 'platlibdir': getattr(sys, 'platlibdir', 'lib'), - }) - with contextlib.suppress(AttributeError): - # only for distutils outside stdlib - self.config_vars.update({ - 'implementation_lower': install._get_implementation().lower(), - 'implementation': install._get_implementation(), - }) - - # pypa/distutils#113 Python 3.9 compat - self.config_vars.setdefault( - 'py_version_nodot_plat', - getattr(sys, 'windir', '').replace('.', ''), - ) - - if site.ENABLE_USER_SITE: - self.config_vars['userbase'] = self.install_userbase - self.config_vars['usersite'] = self.install_usersite - - elif self.user: - log.warn("WARNING: The user site-packages directory is disabled.") - - self._fix_install_dir_for_user_site() - - self.expand_basedirs() - self.expand_dirs() - - self._expand( - 'install_dir', 'script_dir', 'build_directory', - 'site_dirs', - ) - # If a non-default installation directory was specified, default the - # script directory to match it. - if self.script_dir is None: - self.script_dir = self.install_dir - - if self.no_find_links is None: - self.no_find_links = False - - # Let install_dir get set by install_lib command, which in turn - # gets its info from the install command, and takes into account - # --prefix and --home and all that other crud. - self.set_undefined_options( - 'install_lib', ('install_dir', 'install_dir') - ) - # Likewise, set default script_dir from 'install_scripts.install_dir' - self.set_undefined_options( - 'install_scripts', ('install_dir', 'script_dir') - ) - - if self.user and self.install_purelib: - self.install_dir = self.install_purelib - self.script_dir = self.install_scripts - # default --record from the install command - self.set_undefined_options('install', ('record', 'record')) - # Should this be moved to the if statement below? It's not used - # elsewhere - normpath = map(normalize_path, sys.path) - self.all_site_dirs = get_site_dirs() - if self.site_dirs is not None: - site_dirs = [ - os.path.expanduser(s.strip()) for s in - self.site_dirs.split(',') - ] - for d in site_dirs: - if not os.path.isdir(d): - log.warn("%s (in --site-dirs) does not exist", d) - elif normalize_path(d) not in normpath: - raise DistutilsOptionError( - d + " (in --site-dirs) is not on sys.path" - ) - else: - self.all_site_dirs.append(normalize_path(d)) - if not self.editable: - self.check_site_dir() - self.index_url = self.index_url or "https://pypi.org/simple/" - self.shadow_path = self.all_site_dirs[:] - for path_item in self.install_dir, normalize_path(self.script_dir): - if path_item not in self.shadow_path: - self.shadow_path.insert(0, path_item) - - if self.allow_hosts is not None: - hosts = [s.strip() for s in self.allow_hosts.split(',')] - else: - hosts = ['*'] - if self.package_index is None: - self.package_index = self.create_index( - self.index_url, search_path=self.shadow_path, hosts=hosts, - ) - self.local_index = Environment(self.shadow_path + sys.path) - - if self.find_links is not None: - if isinstance(self.find_links, str): - self.find_links = self.find_links.split() - else: - self.find_links = [] - if self.local_snapshots_ok: - self.package_index.scan_egg_links(self.shadow_path + sys.path) - if not self.no_find_links: - self.package_index.add_find_links(self.find_links) - self.set_undefined_options('install_lib', ('optimize', 'optimize')) - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - if not (0 <= self.optimize <= 2): - raise ValueError - except ValueError as e: - raise DistutilsOptionError( - "--optimize must be 0, 1, or 2" - ) from e - - if self.editable and not self.build_directory: - raise DistutilsArgError( - "Must specify a build directory (-b) when using --editable" - ) - if not self.args: - raise DistutilsArgError( - "No urls, filenames, or requirements specified (see --help)") - - self.outputs = [] - - def _fix_install_dir_for_user_site(self): - """ - Fix the install_dir if "--user" was used. - """ - if not self.user or not site.ENABLE_USER_SITE: - return - - self.create_home_path() - if self.install_userbase is None: - msg = "User base directory is not specified" - raise DistutilsPlatformError(msg) - self.install_base = self.install_platbase = self.install_userbase - scheme_name = f'{os.name}_user' - self.select_scheme(scheme_name) - - def _expand_attrs(self, attrs): - for attr in attrs: - val = getattr(self, attr) - if val is not None: - if os.name == 'posix' or os.name == 'nt': - val = os.path.expanduser(val) - val = subst_vars(val, self.config_vars) - setattr(self, attr, val) - - def expand_basedirs(self): - """Calls `os.path.expanduser` on install_base, install_platbase and - root.""" - self._expand_attrs(['install_base', 'install_platbase', 'root']) - - def expand_dirs(self): - """Calls `os.path.expanduser` on install dirs.""" - dirs = [ - 'install_purelib', - 'install_platlib', - 'install_lib', - 'install_headers', - 'install_scripts', - 'install_data', - ] - self._expand_attrs(dirs) - - def run(self, show_deprecation=True): - if show_deprecation: - self.announce( - "WARNING: The easy_install command is deprecated " - "and will be removed in a future version.", - log.WARN, - ) - if self.verbose != self.distribution.verbose: - log.set_verbosity(self.verbose) - try: - for spec in self.args: - self.easy_install(spec, not self.no_deps) - if self.record: - outputs = self.outputs - if self.root: # strip any package prefix - root_len = len(self.root) - for counter in range(len(outputs)): - outputs[counter] = outputs[counter][root_len:] - from distutils import file_util - - self.execute( - file_util.write_file, (self.record, outputs), - "writing list of installed files to '%s'" % - self.record - ) - self.warn_deprecated_options() - finally: - log.set_verbosity(self.distribution.verbose) - - def pseudo_tempname(self): - """Return a pseudo-tempname base in the install directory. - This code is intentionally naive; if a malicious party can write to - the target directory you're already in deep doodoo. - """ - try: - pid = os.getpid() - except Exception: - pid = random.randint(0, sys.maxsize) - return os.path.join(self.install_dir, "test-easy-install-%s" % pid) - - def warn_deprecated_options(self): - pass - - def check_site_dir(self): # noqa: C901 # is too complex (12) # FIXME - """Verify that self.install_dir is .pth-capable dir, if needed""" - - instdir = normalize_path(self.install_dir) - pth_file = os.path.join(instdir, 'easy-install.pth') - - if not os.path.exists(instdir): - try: - os.makedirs(instdir) - except (OSError, IOError): - self.cant_write_to_target() - - # Is it a configured, PYTHONPATH, implicit, or explicit site dir? - is_site_dir = instdir in self.all_site_dirs - - if not is_site_dir and not self.multi_version: - # No? Then directly test whether it does .pth file processing - is_site_dir = self.check_pth_processing() - else: - # make sure we can write to target dir - testfile = self.pseudo_tempname() + '.write-test' - test_exists = os.path.exists(testfile) - try: - if test_exists: - os.unlink(testfile) - open(testfile, 'w').close() - os.unlink(testfile) - except (OSError, IOError): - self.cant_write_to_target() - - if not is_site_dir and not self.multi_version: - # Can't install non-multi to non-site dir with easy_install - pythonpath = os.environ.get('PYTHONPATH', '') - log.warn(self.__no_default_msg, self.install_dir, pythonpath) - - if is_site_dir: - if self.pth_file is None: - self.pth_file = PthDistributions(pth_file, self.all_site_dirs) - else: - self.pth_file = None - - if self.multi_version and not os.path.exists(pth_file): - self.pth_file = None # don't create a .pth file - self.install_dir = instdir - - __cant_write_msg = textwrap.dedent(""" - can't create or remove files in install directory - - The following error occurred while trying to add or remove files in the - installation directory: - - %s - - The installation directory you specified (via --install-dir, --prefix, or - the distutils default setting) was: - - %s - """).lstrip() # noqa - - __not_exists_id = textwrap.dedent(""" - This directory does not currently exist. Please create it and try again, or - choose a different installation directory (using the -d or --install-dir - option). - """).lstrip() # noqa - - __access_msg = textwrap.dedent(""" - Perhaps your account does not have write access to this directory? If the - installation directory is a system-owned directory, you may need to sign in - as the administrator or "root" account. If you do not have administrative - access to this machine, you may wish to choose a different installation - directory, preferably one that is listed in your PYTHONPATH environment - variable. - - For information on other options, you may wish to consult the - documentation at: - - https://setuptools.pypa.io/en/latest/deprecated/easy_install.html - - Please make the appropriate changes for your system and try again. - """).lstrip() # noqa - - def cant_write_to_target(self): - msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,) - - if not os.path.exists(self.install_dir): - msg += '\n' + self.__not_exists_id - else: - msg += '\n' + self.__access_msg - raise DistutilsError(msg) - - def check_pth_processing(self): - """Empirically verify whether .pth files are supported in inst. dir""" - instdir = self.install_dir - log.info("Checking .pth file support in %s", instdir) - pth_file = self.pseudo_tempname() + ".pth" - ok_file = pth_file + '.ok' - ok_exists = os.path.exists(ok_file) - tmpl = _one_liner(""" - import os - f = open({ok_file!r}, 'w') - f.write('OK') - f.close() - """) + '\n' - try: - if ok_exists: - os.unlink(ok_file) - dirname = os.path.dirname(ok_file) - os.makedirs(dirname, exist_ok=True) - f = open(pth_file, 'w') - except (OSError, IOError): - self.cant_write_to_target() - else: - try: - f.write(tmpl.format(**locals())) - f.close() - f = None - executable = sys.executable - if os.name == 'nt': - dirname, basename = os.path.split(executable) - alt = os.path.join(dirname, 'pythonw.exe') - use_alt = ( - basename.lower() == 'python.exe' and - os.path.exists(alt) - ) - if use_alt: - # use pythonw.exe to avoid opening a console window - executable = alt - - from distutils.spawn import spawn - - spawn([executable, '-E', '-c', 'pass'], 0) - - if os.path.exists(ok_file): - log.info( - "TEST PASSED: %s appears to support .pth files", - instdir - ) - return True - finally: - if f: - f.close() - if os.path.exists(ok_file): - os.unlink(ok_file) - if os.path.exists(pth_file): - os.unlink(pth_file) - if not self.multi_version: - log.warn("TEST FAILED: %s does NOT support .pth files", instdir) - return False - - def install_egg_scripts(self, dist): - """Write all the scripts for `dist`, unless scripts are excluded""" - if not self.exclude_scripts and dist.metadata_isdir('scripts'): - for script_name in dist.metadata_listdir('scripts'): - if dist.metadata_isdir('scripts/' + script_name): - # The "script" is a directory, likely a Python 3 - # __pycache__ directory, so skip it. - continue - self.install_script( - dist, script_name, - dist.get_metadata('scripts/' + script_name) - ) - self.install_wrapper_scripts(dist) - - def add_output(self, path): - if os.path.isdir(path): - for base, dirs, files in os.walk(path): - for filename in files: - self.outputs.append(os.path.join(base, filename)) - else: - self.outputs.append(path) - - def not_editable(self, spec): - if self.editable: - raise DistutilsArgError( - "Invalid argument %r: you can't use filenames or URLs " - "with --editable (except via the --find-links option)." - % (spec,) - ) - - def check_editable(self, spec): - if not self.editable: - return - - if os.path.exists(os.path.join(self.build_directory, spec.key)): - raise DistutilsArgError( - "%r already exists in %s; can't do a checkout there" % - (spec.key, self.build_directory) - ) - - @contextlib.contextmanager - def _tmpdir(self): - tmpdir = tempfile.mkdtemp(prefix=u"easy_install-") - try: - # cast to str as workaround for #709 and #710 and #712 - yield str(tmpdir) - finally: - os.path.exists(tmpdir) and rmtree(tmpdir) - - def easy_install(self, spec, deps=False): - with self._tmpdir() as tmpdir: - if not isinstance(spec, Requirement): - if URL_SCHEME(spec): - # It's a url, download it to tmpdir and process - self.not_editable(spec) - dl = self.package_index.download(spec, tmpdir) - return self.install_item(None, dl, tmpdir, deps, True) - - elif os.path.exists(spec): - # Existing file or directory, just process it directly - self.not_editable(spec) - return self.install_item(None, spec, tmpdir, deps, True) - else: - spec = parse_requirement_arg(spec) - - self.check_editable(spec) - dist = self.package_index.fetch_distribution( - spec, tmpdir, self.upgrade, self.editable, - not self.always_copy, self.local_index - ) - if dist is None: - msg = "Could not find suitable distribution for %r" % spec - if self.always_copy: - msg += " (--always-copy skips system and development eggs)" - raise DistutilsError(msg) - elif dist.precedence == DEVELOP_DIST: - # .egg-info dists don't need installing, just process deps - self.process_distribution(spec, dist, deps, "Using") - return dist - else: - return self.install_item(spec, dist.location, tmpdir, deps) - - def install_item(self, spec, download, tmpdir, deps, install_needed=False): - - # Installation is also needed if file in tmpdir or is not an egg - install_needed = install_needed or self.always_copy - install_needed = install_needed or os.path.dirname(download) == tmpdir - install_needed = install_needed or not download.endswith('.egg') - install_needed = install_needed or ( - self.always_copy_from is not None and - os.path.dirname(normalize_path(download)) == - normalize_path(self.always_copy_from) - ) - - if spec and not install_needed: - # at this point, we know it's a local .egg, we just don't know if - # it's already installed. - for dist in self.local_index[spec.project_name]: - if dist.location == download: - break - else: - install_needed = True # it's not in the local index - - log.info("Processing %s", os.path.basename(download)) - - if install_needed: - dists = self.install_eggs(spec, download, tmpdir) - for dist in dists: - self.process_distribution(spec, dist, deps) - else: - dists = [self.egg_distribution(download)] - self.process_distribution(spec, dists[0], deps, "Using") - - if spec is not None: - for dist in dists: - if dist in spec: - return dist - - def select_scheme(self, name): - try: - install._select_scheme(self, name) - except AttributeError: - # stdlib distutils - install.install.select_scheme(self, name.replace('posix', 'unix')) - - # FIXME: 'easy_install.process_distribution' is too complex (12) - def process_distribution( # noqa: C901 - self, requirement, dist, deps=True, *info, - ): - self.update_pth(dist) - self.package_index.add(dist) - if dist in self.local_index[dist.key]: - self.local_index.remove(dist) - self.local_index.add(dist) - self.install_egg_scripts(dist) - self.installed_projects[dist.key] = dist - log.info(self.installation_report(requirement, dist, *info)) - if (dist.has_metadata('dependency_links.txt') and - not self.no_find_links): - self.package_index.add_find_links( - dist.get_metadata_lines('dependency_links.txt') - ) - if not deps and not self.always_copy: - return - elif requirement is not None and dist.key != requirement.key: - log.warn("Skipping dependencies for %s", dist) - return # XXX this is not the distribution we were looking for - elif requirement is None or dist not in requirement: - # if we wound up with a different version, resolve what we've got - distreq = dist.as_requirement() - requirement = Requirement(str(distreq)) - log.info("Processing dependencies for %s", requirement) - try: - distros = WorkingSet([]).resolve( - [requirement], self.local_index, self.easy_install - ) - except DistributionNotFound as e: - raise DistutilsError(str(e)) from e - except VersionConflict as e: - raise DistutilsError(e.report()) from e - if self.always_copy or self.always_copy_from: - # Force all the relevant distros to be copied or activated - for dist in distros: - if dist.key not in self.installed_projects: - self.easy_install(dist.as_requirement()) - log.info("Finished processing dependencies for %s", requirement) - - def should_unzip(self, dist): - if self.zip_ok is not None: - return not self.zip_ok - if dist.has_metadata('not-zip-safe'): - return True - if not dist.has_metadata('zip-safe'): - return True - return False - - def maybe_move(self, spec, dist_filename, setup_base): - dst = os.path.join(self.build_directory, spec.key) - if os.path.exists(dst): - msg = ( - "%r already exists in %s; build directory %s will not be kept" - ) - log.warn(msg, spec.key, self.build_directory, setup_base) - return setup_base - if os.path.isdir(dist_filename): - setup_base = dist_filename - else: - if os.path.dirname(dist_filename) == setup_base: - os.unlink(dist_filename) # get it out of the tmp dir - contents = os.listdir(setup_base) - if len(contents) == 1: - dist_filename = os.path.join(setup_base, contents[0]) - if os.path.isdir(dist_filename): - # if the only thing there is a directory, move it instead - setup_base = dist_filename - ensure_directory(dst) - shutil.move(setup_base, dst) - return dst - - def install_wrapper_scripts(self, dist): - if self.exclude_scripts: - return - for args in ScriptWriter.best().get_args(dist): - self.write_script(*args) - - def install_script(self, dist, script_name, script_text, dev_path=None): - """Generate a legacy script wrapper and install it""" - spec = str(dist.as_requirement()) - is_script = is_python_script(script_text, script_name) - - if is_script: - body = self._load_template(dev_path) % locals() - script_text = ScriptWriter.get_header(script_text) + body - self.write_script(script_name, _to_bytes(script_text), 'b') - - @staticmethod - def _load_template(dev_path): - """ - There are a couple of template scripts in the package. This - function loads one of them and prepares it for use. - """ - # See https://github.com/pypa/setuptools/issues/134 for info - # on script file naming and downstream issues with SVR4 - name = 'script.tmpl' - if dev_path: - name = name.replace('.tmpl', ' (dev).tmpl') - - raw_bytes = resource_string('setuptools', name) - return raw_bytes.decode('utf-8') - - def write_script(self, script_name, contents, mode="t", blockers=()): - """Write an executable file to the scripts directory""" - self.delete_blockers( # clean up old .py/.pyw w/o a script - [os.path.join(self.script_dir, x) for x in blockers] - ) - log.info("Installing %s script to %s", script_name, self.script_dir) - target = os.path.join(self.script_dir, script_name) - self.add_output(target) - - if self.dry_run: - return - - mask = current_umask() - ensure_directory(target) - if os.path.exists(target): - os.unlink(target) - with open(target, "w" + mode) as f: - f.write(contents) - chmod(target, 0o777 - mask) - - def install_eggs(self, spec, dist_filename, tmpdir): - # .egg dirs or files are already built, so just return them - installer_map = { - '.egg': self.install_egg, - '.exe': self.install_exe, - '.whl': self.install_wheel, - } - try: - install_dist = installer_map[ - dist_filename.lower()[-4:] - ] - except KeyError: - pass - else: - return [install_dist(dist_filename, tmpdir)] - - # Anything else, try to extract and build - setup_base = tmpdir - if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'): - unpack_archive(dist_filename, tmpdir, self.unpack_progress) - elif os.path.isdir(dist_filename): - setup_base = os.path.abspath(dist_filename) - - if (setup_base.startswith(tmpdir) # something we downloaded - and self.build_directory and spec is not None): - setup_base = self.maybe_move(spec, dist_filename, setup_base) - - # Find the setup.py file - setup_script = os.path.join(setup_base, 'setup.py') - - if not os.path.exists(setup_script): - setups = glob(os.path.join(setup_base, '*', 'setup.py')) - if not setups: - raise DistutilsError( - "Couldn't find a setup script in %s" % - os.path.abspath(dist_filename) - ) - if len(setups) > 1: - raise DistutilsError( - "Multiple setup scripts in %s" % - os.path.abspath(dist_filename) - ) - setup_script = setups[0] - - # Now run it, and return the result - if self.editable: - log.info(self.report_editable(spec, setup_script)) - return [] - else: - return self.build_and_install(setup_script, setup_base) - - def egg_distribution(self, egg_path): - if os.path.isdir(egg_path): - metadata = PathMetadata(egg_path, os.path.join(egg_path, - 'EGG-INFO')) - else: - metadata = EggMetadata(zipimport.zipimporter(egg_path)) - return Distribution.from_filename(egg_path, metadata=metadata) - - # FIXME: 'easy_install.install_egg' is too complex (11) - def install_egg(self, egg_path, tmpdir): # noqa: C901 - destination = os.path.join( - self.install_dir, - os.path.basename(egg_path), - ) - destination = os.path.abspath(destination) - if not self.dry_run: - ensure_directory(destination) - - dist = self.egg_distribution(egg_path) - if not samefile(egg_path, destination): - if os.path.isdir(destination) and not os.path.islink(destination): - dir_util.remove_tree(destination, dry_run=self.dry_run) - elif os.path.exists(destination): - self.execute( - os.unlink, - (destination,), - "Removing " + destination, - ) - try: - new_dist_is_zipped = False - if os.path.isdir(egg_path): - if egg_path.startswith(tmpdir): - f, m = shutil.move, "Moving" - else: - f, m = shutil.copytree, "Copying" - elif self.should_unzip(dist): - self.mkpath(destination) - f, m = self.unpack_and_compile, "Extracting" - else: - new_dist_is_zipped = True - if egg_path.startswith(tmpdir): - f, m = shutil.move, "Moving" - else: - f, m = shutil.copy2, "Copying" - self.execute( - f, - (egg_path, destination), - (m + " %s to %s") % ( - os.path.basename(egg_path), - os.path.dirname(destination) - ), - ) - update_dist_caches( - destination, - fix_zipimporter_caches=new_dist_is_zipped, - ) - except Exception: - update_dist_caches(destination, fix_zipimporter_caches=False) - raise - - self.add_output(destination) - return self.egg_distribution(destination) - - def install_exe(self, dist_filename, tmpdir): - # See if it's valid, get data - cfg = extract_wininst_cfg(dist_filename) - if cfg is None: - raise DistutilsError( - "%s is not a valid distutils Windows .exe" % dist_filename - ) - # Create a dummy distribution object until we build the real distro - dist = Distribution( - None, - project_name=cfg.get('metadata', 'name'), - version=cfg.get('metadata', 'version'), platform=get_platform(), - ) - - # Convert the .exe to an unpacked egg - egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg') - dist.location = egg_path - egg_tmp = egg_path + '.tmp' - _egg_info = os.path.join(egg_tmp, 'EGG-INFO') - pkg_inf = os.path.join(_egg_info, 'PKG-INFO') - ensure_directory(pkg_inf) # make sure EGG-INFO dir exists - dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX - self.exe_to_egg(dist_filename, egg_tmp) - - # Write EGG-INFO/PKG-INFO - if not os.path.exists(pkg_inf): - f = open(pkg_inf, 'w') - f.write('Metadata-Version: 1.0\n') - for k, v in cfg.items('metadata'): - if k != 'target_version': - f.write('%s: %s\n' % (k.replace('_', '-').title(), v)) - f.close() - script_dir = os.path.join(_egg_info, 'scripts') - # delete entry-point scripts to avoid duping - self.delete_blockers([ - os.path.join(script_dir, args[0]) - for args in ScriptWriter.get_args(dist) - ]) - # Build .egg file from tmpdir - bdist_egg.make_zipfile( - egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run, - ) - # install the .egg - return self.install_egg(egg_path, tmpdir) - - # FIXME: 'easy_install.exe_to_egg' is too complex (12) - def exe_to_egg(self, dist_filename, egg_tmp): # noqa: C901 - """Extract a bdist_wininst to the directories an egg would use""" - # Check for .pth file and set up prefix translations - prefixes = get_exe_prefixes(dist_filename) - to_compile = [] - native_libs = [] - top_level = {} - - def process(src, dst): - s = src.lower() - for old, new in prefixes: - if s.startswith(old): - src = new + src[len(old):] - parts = src.split('/') - dst = os.path.join(egg_tmp, *parts) - dl = dst.lower() - if dl.endswith('.pyd') or dl.endswith('.dll'): - parts[-1] = bdist_egg.strip_module(parts[-1]) - top_level[os.path.splitext(parts[0])[0]] = 1 - native_libs.append(src) - elif dl.endswith('.py') and old != 'SCRIPTS/': - top_level[os.path.splitext(parts[0])[0]] = 1 - to_compile.append(dst) - return dst - if not src.endswith('.pth'): - log.warn("WARNING: can't process %s", src) - return None - - # extract, tracking .pyd/.dll->native_libs and .py -> to_compile - unpack_archive(dist_filename, egg_tmp, process) - stubs = [] - for res in native_libs: - if res.lower().endswith('.pyd'): # create stubs for .pyd's - parts = res.split('/') - resource = parts[-1] - parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py' - pyfile = os.path.join(egg_tmp, *parts) - to_compile.append(pyfile) - stubs.append(pyfile) - bdist_egg.write_stub(resource, pyfile) - self.byte_compile(to_compile) # compile .py's - bdist_egg.write_safety_flag( - os.path.join(egg_tmp, 'EGG-INFO'), - bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag - - for name in 'top_level', 'native_libs': - if locals()[name]: - txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt') - if not os.path.exists(txt): - f = open(txt, 'w') - f.write('\n'.join(locals()[name]) + '\n') - f.close() - - def install_wheel(self, wheel_path, tmpdir): - wheel = Wheel(wheel_path) - assert wheel.is_compatible() - destination = os.path.join(self.install_dir, wheel.egg_name()) - destination = os.path.abspath(destination) - if not self.dry_run: - ensure_directory(destination) - if os.path.isdir(destination) and not os.path.islink(destination): - dir_util.remove_tree(destination, dry_run=self.dry_run) - elif os.path.exists(destination): - self.execute( - os.unlink, - (destination,), - "Removing " + destination, - ) - try: - self.execute( - wheel.install_as_egg, - (destination,), - ("Installing %s to %s") % ( - os.path.basename(wheel_path), - os.path.dirname(destination) - ), - ) - finally: - update_dist_caches(destination, fix_zipimporter_caches=False) - self.add_output(destination) - return self.egg_distribution(destination) - - __mv_warning = textwrap.dedent(""" - Because this distribution was installed --multi-version, before you can - import modules from this package in an application, you will need to - 'import pkg_resources' and then use a 'require()' call similar to one of - these examples, in order to select the desired version: - - pkg_resources.require("%(name)s") # latest installed version - pkg_resources.require("%(name)s==%(version)s") # this exact version - pkg_resources.require("%(name)s>=%(version)s") # this version or higher - """).lstrip() # noqa - - __id_warning = textwrap.dedent(""" - Note also that the installation directory must be on sys.path at runtime for - this to work. (e.g. by being the application's script directory, by being on - PYTHONPATH, or by being added to sys.path by your code.) - """) # noqa - - def installation_report(self, req, dist, what="Installed"): - """Helpful installation message for display to package users""" - msg = "\n%(what)s %(eggloc)s%(extras)s" - if self.multi_version and not self.no_report: - msg += '\n' + self.__mv_warning - if self.install_dir not in map(normalize_path, sys.path): - msg += '\n' + self.__id_warning - - eggloc = dist.location - name = dist.project_name - version = dist.version - extras = '' # TODO: self.report_extras(req, dist) - return msg % locals() - - __editable_msg = textwrap.dedent(""" - Extracted editable version of %(spec)s to %(dirname)s - - If it uses setuptools in its setup script, you can activate it in - "development" mode by going to that directory and running:: - - %(python)s setup.py develop - - See the setuptools documentation for the "develop" command for more info. - """).lstrip() # noqa - - def report_editable(self, spec, setup_script): - dirname = os.path.dirname(setup_script) - python = sys.executable - return '\n' + self.__editable_msg % locals() - - def run_setup(self, setup_script, setup_base, args): - sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg) - sys.modules.setdefault('distutils.command.egg_info', egg_info) - - args = list(args) - if self.verbose > 2: - v = 'v' * (self.verbose - 1) - args.insert(0, '-' + v) - elif self.verbose < 2: - args.insert(0, '-q') - if self.dry_run: - args.insert(0, '-n') - log.info( - "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args) - ) - try: - run_setup(setup_script, args) - except SystemExit as v: - raise DistutilsError( - "Setup script exited with %s" % (v.args[0],) - ) from v - - def build_and_install(self, setup_script, setup_base): - args = ['bdist_egg', '--dist-dir'] - - dist_dir = tempfile.mkdtemp( - prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script) - ) - try: - self._set_fetcher_options(os.path.dirname(setup_script)) - args.append(dist_dir) - - self.run_setup(setup_script, setup_base, args) - all_eggs = Environment([dist_dir]) - eggs = [] - for key in all_eggs: - for dist in all_eggs[key]: - eggs.append(self.install_egg(dist.location, setup_base)) - if not eggs and not self.dry_run: - log.warn("No eggs found in %s (setup script problem?)", - dist_dir) - return eggs - finally: - rmtree(dist_dir) - log.set_verbosity(self.verbose) # restore our log verbosity - - def _set_fetcher_options(self, base): - """ - When easy_install is about to run bdist_egg on a source dist, that - source dist might have 'setup_requires' directives, requiring - additional fetching. Ensure the fetcher options given to easy_install - are available to that command as well. - """ - # find the fetch options from easy_install and write them out - # to the setup.cfg file. - ei_opts = self.distribution.get_option_dict('easy_install').copy() - fetch_directives = ( - 'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts', - ) - fetch_options = {} - for key, val in ei_opts.items(): - if key not in fetch_directives: - continue - fetch_options[key] = val[1] - # create a settings dictionary suitable for `edit_config` - settings = dict(easy_install=fetch_options) - cfg_filename = os.path.join(base, 'setup.cfg') - setopt.edit_config(cfg_filename, settings) - - def update_pth(self, dist): # noqa: C901 # is too complex (11) # FIXME - if self.pth_file is None: - return - - for d in self.pth_file[dist.key]: # drop old entries - if not self.multi_version and d.location == dist.location: - continue - - log.info("Removing %s from easy-install.pth file", d) - self.pth_file.remove(d) - if d.location in self.shadow_path: - self.shadow_path.remove(d.location) - - if not self.multi_version: - if dist.location in self.pth_file.paths: - log.info( - "%s is already the active version in easy-install.pth", - dist, - ) - else: - log.info("Adding %s to easy-install.pth file", dist) - self.pth_file.add(dist) # add new entry - if dist.location not in self.shadow_path: - self.shadow_path.append(dist.location) - - if self.dry_run: - return - - self.pth_file.save() - - if dist.key != 'setuptools': - return - - # Ensure that setuptools itself never becomes unavailable! - # XXX should this check for latest version? - filename = os.path.join(self.install_dir, 'setuptools.pth') - if os.path.islink(filename): - os.unlink(filename) - with open(filename, 'wt') as f: - f.write(self.pth_file.make_relative(dist.location) + '\n') - - def unpack_progress(self, src, dst): - # Progress filter for unpacking - log.debug("Unpacking %s to %s", src, dst) - return dst # only unpack-and-compile skips files for dry run - - def unpack_and_compile(self, egg_path, destination): - to_compile = [] - to_chmod = [] - - def pf(src, dst): - if dst.endswith('.py') and not src.startswith('EGG-INFO/'): - to_compile.append(dst) - elif dst.endswith('.dll') or dst.endswith('.so'): - to_chmod.append(dst) - self.unpack_progress(src, dst) - return not self.dry_run and dst or None - - unpack_archive(egg_path, destination, pf) - self.byte_compile(to_compile) - if not self.dry_run: - for f in to_chmod: - mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755 - chmod(f, mode) - - def byte_compile(self, to_compile): - if sys.dont_write_bytecode: - return - - from distutils.util import byte_compile - - try: - # try to make the byte compile messages quieter - log.set_verbosity(self.verbose - 1) - - byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run) - if self.optimize: - byte_compile( - to_compile, optimize=self.optimize, force=1, - dry_run=self.dry_run, - ) - finally: - log.set_verbosity(self.verbose) # restore original verbosity - - __no_default_msg = textwrap.dedent(""" - bad install directory or PYTHONPATH - - You are attempting to install a package to a directory that is not - on PYTHONPATH and which Python does not read ".pth" files from. The - installation directory you specified (via --install-dir, --prefix, or - the distutils default setting) was: - - %s - - and your PYTHONPATH environment variable currently contains: - - %r - - Here are some of your options for correcting the problem: - - * You can choose a different installation directory, i.e., one that is - on PYTHONPATH or supports .pth files - - * You can add the installation directory to the PYTHONPATH environment - variable. (It must then also be on PYTHONPATH whenever you run - Python and want to use the package(s) you are installing.) - - * You can set up the installation directory to support ".pth" files by - using one of the approaches described here: - - https://setuptools.pypa.io/en/latest/deprecated/easy_install.html#custom-installation-locations - - - Please make the appropriate changes for your system and try again. - """).strip() - - def create_home_path(self): - """Create directories under ~.""" - if not self.user: - return - home = convert_path(os.path.expanduser("~")) - for path in only_strs(self.config_vars.values()): - if path.startswith(home) and not os.path.isdir(path): - self.debug_print("os.makedirs('%s', 0o700)" % path) - os.makedirs(path, 0o700) - - INSTALL_SCHEMES = dict( - posix=dict( - install_dir='$base/lib/python$py_version_short/site-packages', - script_dir='$base/bin', - ), - ) - - DEFAULT_SCHEME = dict( - install_dir='$base/Lib/site-packages', - script_dir='$base/Scripts', - ) - - def _expand(self, *attrs): - config_vars = self.get_finalized_command('install').config_vars - - if self.prefix: - # Set default install_dir/scripts from --prefix - config_vars = dict(config_vars) - config_vars['base'] = self.prefix - scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME) - for attr, val in scheme.items(): - if getattr(self, attr, None) is None: - setattr(self, attr, val) - - from distutils.util import subst_vars - - for attr in attrs: - val = getattr(self, attr) - if val is not None: - val = subst_vars(val, config_vars) - if os.name == 'posix': - val = os.path.expanduser(val) - setattr(self, attr, val) - - -def _pythonpath(): - items = os.environ.get('PYTHONPATH', '').split(os.pathsep) - return filter(None, items) - - -def get_site_dirs(): - """ - Return a list of 'site' dirs - """ - - sitedirs = [] - - # start with PYTHONPATH - sitedirs.extend(_pythonpath()) - - prefixes = [sys.prefix] - if sys.exec_prefix != sys.prefix: - prefixes.append(sys.exec_prefix) - for prefix in prefixes: - if not prefix: - continue - - if sys.platform in ('os2emx', 'riscos'): - sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) - elif os.sep == '/': - sitedirs.extend([ - os.path.join( - prefix, - "lib", - "python{}.{}".format(*sys.version_info), - "site-packages", - ), - os.path.join(prefix, "lib", "site-python"), - ]) - else: - sitedirs.extend([ - prefix, - os.path.join(prefix, "lib", "site-packages"), - ]) - if sys.platform != 'darwin': - continue - - # for framework builds *only* we add the standard Apple - # locations. Currently only per-user, but /Library and - # /Network/Library could be added too - if 'Python.framework' not in prefix: - continue - - home = os.environ.get('HOME') - if not home: - continue - - home_sp = os.path.join( - home, - 'Library', - 'Python', - '{}.{}'.format(*sys.version_info), - 'site-packages', - ) - sitedirs.append(home_sp) - lib_paths = get_path('purelib'), get_path('platlib') - - sitedirs.extend(s for s in lib_paths if s not in sitedirs) - - if site.ENABLE_USER_SITE: - sitedirs.append(site.USER_SITE) - - with contextlib.suppress(AttributeError): - sitedirs.extend(site.getsitepackages()) - - sitedirs = list(map(normalize_path, sitedirs)) - - return sitedirs - - -def expand_paths(inputs): # noqa: C901 # is too complex (11) # FIXME - """Yield sys.path directories that might contain "old-style" packages""" - - seen = {} - - for dirname in inputs: - dirname = normalize_path(dirname) - if dirname in seen: - continue - - seen[dirname] = 1 - if not os.path.isdir(dirname): - continue - - files = os.listdir(dirname) - yield dirname, files - - for name in files: - if not name.endswith('.pth'): - # We only care about the .pth files - continue - if name in ('easy-install.pth', 'setuptools.pth'): - # Ignore .pth files that we control - continue - - # Read the .pth file - f = open(os.path.join(dirname, name)) - lines = list(yield_lines(f)) - f.close() - - # Yield existing non-dupe, non-import directory lines from it - for line in lines: - if line.startswith("import"): - continue - - line = normalize_path(line.rstrip()) - if line in seen: - continue - - seen[line] = 1 - if not os.path.isdir(line): - continue - - yield line, os.listdir(line) - - -def extract_wininst_cfg(dist_filename): - """Extract configuration data from a bdist_wininst .exe - - Returns a configparser.RawConfigParser, or None - """ - f = open(dist_filename, 'rb') - try: - endrec = zipfile._EndRecData(f) - if endrec is None: - return None - - prepended = (endrec[9] - endrec[5]) - endrec[6] - if prepended < 12: # no wininst data here - return None - f.seek(prepended - 12) - - tag, cfglen, bmlen = struct.unpack("egg path translations for a given .exe file""" - - prefixes = [ - ('PURELIB/', ''), - ('PLATLIB/pywin32_system32', ''), - ('PLATLIB/', ''), - ('SCRIPTS/', 'EGG-INFO/scripts/'), - ('DATA/lib/site-packages', ''), - ] - z = zipfile.ZipFile(exe_filename) - try: - for info in z.infolist(): - name = info.filename - parts = name.split('/') - if len(parts) == 3 and parts[2] == 'PKG-INFO': - if parts[1].endswith('.egg-info'): - prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/')) - break - if len(parts) != 2 or not name.endswith('.pth'): - continue - if name.endswith('-nspkg.pth'): - continue - if parts[0].upper() in ('PURELIB', 'PLATLIB'): - contents = z.read(name).decode() - for pth in yield_lines(contents): - pth = pth.strip().replace('\\', '/') - if not pth.startswith('import'): - prefixes.append((('%s/%s/' % (parts[0], pth)), '')) - finally: - z.close() - prefixes = [(x.lower(), y) for x, y in prefixes] - prefixes.sort() - prefixes.reverse() - return prefixes - - -class PthDistributions(Environment): - """A .pth file with Distribution paths in it""" - - dirty = False - - def __init__(self, filename, sitedirs=()): - self.filename = filename - self.sitedirs = list(map(normalize_path, sitedirs)) - self.basedir = normalize_path(os.path.dirname(self.filename)) - self._load() - super().__init__([], None, None) - for path in yield_lines(self.paths): - list(map(self.add, find_distributions(path, True))) - - def _load(self): - self.paths = [] - saw_import = False - seen = dict.fromkeys(self.sitedirs) - if os.path.isfile(self.filename): - f = open(self.filename, 'rt') - for line in f: - if line.startswith('import'): - saw_import = True - continue - path = line.rstrip() - self.paths.append(path) - if not path.strip() or path.strip().startswith('#'): - continue - # skip non-existent paths, in case somebody deleted a package - # manually, and duplicate paths as well - path = self.paths[-1] = normalize_path( - os.path.join(self.basedir, path) - ) - if not os.path.exists(path) or path in seen: - self.paths.pop() # skip it - self.dirty = True # we cleaned up, so we're dirty now :) - continue - seen[path] = 1 - f.close() - - if self.paths and not saw_import: - self.dirty = True # ensure anything we touch has import wrappers - while self.paths and not self.paths[-1].strip(): - self.paths.pop() - - def save(self): - """Write changed .pth file back to disk""" - if not self.dirty: - return - - rel_paths = list(map(self.make_relative, self.paths)) - if rel_paths: - log.debug("Saving %s", self.filename) - lines = self._wrap_lines(rel_paths) - data = '\n'.join(lines) + '\n' - - if os.path.islink(self.filename): - os.unlink(self.filename) - with open(self.filename, 'wt') as f: - f.write(data) - - elif os.path.exists(self.filename): - log.debug("Deleting empty %s", self.filename) - os.unlink(self.filename) - - self.dirty = False - - @staticmethod - def _wrap_lines(lines): - return lines - - def add(self, dist): - """Add `dist` to the distribution map""" - new_path = ( - dist.location not in self.paths and ( - dist.location not in self.sitedirs or - # account for '.' being in PYTHONPATH - dist.location == os.getcwd() - ) - ) - if new_path: - self.paths.append(dist.location) - self.dirty = True - Environment.add(self, dist) - - def remove(self, dist): - """Remove `dist` from the distribution map""" - while dist.location in self.paths: - self.paths.remove(dist.location) - self.dirty = True - Environment.remove(self, dist) - - def make_relative(self, path): - npath, last = os.path.split(normalize_path(path)) - baselen = len(self.basedir) - parts = [last] - sep = os.altsep == '/' and '/' or os.sep - while len(npath) >= baselen: - if npath == self.basedir: - parts.append(os.curdir) - parts.reverse() - return sep.join(parts) - npath, last = os.path.split(npath) - parts.append(last) - else: - return path - - -class RewritePthDistributions(PthDistributions): - @classmethod - def _wrap_lines(cls, lines): - yield cls.prelude - for line in lines: - yield line - yield cls.postlude - - prelude = _one_liner(""" - import sys - sys.__plen = len(sys.path) - """) - postlude = _one_liner(""" - import sys - new = sys.path[sys.__plen:] - del sys.path[sys.__plen:] - p = getattr(sys, '__egginsert', 0) - sys.path[p:p] = new - sys.__egginsert = p + len(new) - """) - - -if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': - PthDistributions = RewritePthDistributions - - -def _first_line_re(): - """ - Return a regular expression based on first_line_re suitable for matching - strings. - """ - if isinstance(first_line_re.pattern, str): - return first_line_re - - # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern. - return re.compile(first_line_re.pattern.decode()) - - -def auto_chmod(func, arg, exc): - if func in [os.unlink, os.remove] and os.name == 'nt': - chmod(arg, stat.S_IWRITE) - return func(arg) - et, ev, _ = sys.exc_info() - # TODO: This code doesn't make sense. What is it trying to do? - raise (ev[0], ev[1] + (" %s %s" % (func, arg))) - - -def update_dist_caches(dist_path, fix_zipimporter_caches): - """ - Fix any globally cached `dist_path` related data - - `dist_path` should be a path of a newly installed egg distribution (zipped - or unzipped). - - sys.path_importer_cache contains finder objects that have been cached when - importing data from the original distribution. Any such finders need to be - cleared since the replacement distribution might be packaged differently, - e.g. a zipped egg distribution might get replaced with an unzipped egg - folder or vice versa. Having the old finders cached may then cause Python - to attempt loading modules from the replacement distribution using an - incorrect loader. - - zipimport.zipimporter objects are Python loaders charged with importing - data packaged inside zip archives. If stale loaders referencing the - original distribution, are left behind, they can fail to load modules from - the replacement distribution. E.g. if an old zipimport.zipimporter instance - is used to load data from a new zipped egg archive, it may cause the - operation to attempt to locate the requested data in the wrong location - - one indicated by the original distribution's zip archive directory - information. Such an operation may then fail outright, e.g. report having - read a 'bad local file header', or even worse, it may fail silently & - return invalid data. - - zipimport._zip_directory_cache contains cached zip archive directory - information for all existing zipimport.zipimporter instances and all such - instances connected to the same archive share the same cached directory - information. - - If asked, and the underlying Python implementation allows it, we can fix - all existing zipimport.zipimporter instances instead of having to track - them down and remove them one by one, by updating their shared cached zip - archive directory information. This, of course, assumes that the - replacement distribution is packaged as a zipped egg. - - If not asked to fix existing zipimport.zipimporter instances, we still do - our best to clear any remaining zipimport.zipimporter related cached data - that might somehow later get used when attempting to load data from the new - distribution and thus cause such load operations to fail. Note that when - tracking down such remaining stale data, we can not catch every conceivable - usage from here, and we clear only those that we know of and have found to - cause problems if left alive. Any remaining caches should be updated by - whomever is in charge of maintaining them, i.e. they should be ready to - handle us replacing their zip archives with new distributions at runtime. - - """ - # There are several other known sources of stale zipimport.zipimporter - # instances that we do not clear here, but might if ever given a reason to - # do so: - # * Global setuptools pkg_resources.working_set (a.k.a. 'master working - # set') may contain distributions which may in turn contain their - # zipimport.zipimporter loaders. - # * Several zipimport.zipimporter loaders held by local variables further - # up the function call stack when running the setuptools installation. - # * Already loaded modules may have their __loader__ attribute set to the - # exact loader instance used when importing them. Python 3.4 docs state - # that this information is intended mostly for introspection and so is - # not expected to cause us problems. - normalized_path = normalize_path(dist_path) - _uncache(normalized_path, sys.path_importer_cache) - if fix_zipimporter_caches: - _replace_zip_directory_cache_data(normalized_path) - else: - # Here, even though we do not want to fix existing and now stale - # zipimporter cache information, we still want to remove it. Related to - # Python's zip archive directory information cache, we clear each of - # its stale entries in two phases: - # 1. Clear the entry so attempting to access zip archive information - # via any existing stale zipimport.zipimporter instances fails. - # 2. Remove the entry from the cache so any newly constructed - # zipimport.zipimporter instances do not end up using old stale - # zip archive directory information. - # This whole stale data removal step does not seem strictly necessary, - # but has been left in because it was done before we started replacing - # the zip archive directory information cache content if possible, and - # there are no relevant unit tests that we can depend on to tell us if - # this is really needed. - _remove_and_clear_zip_directory_cache_data(normalized_path) - - -def _collect_zipimporter_cache_entries(normalized_path, cache): - """ - Return zipimporter cache entry keys related to a given normalized path. - - Alternative path spellings (e.g. those using different character case or - those using alternative path separators) related to the same path are - included. Any sub-path entries are included as well, i.e. those - corresponding to zip archives embedded in other zip archives. - - """ - result = [] - prefix_len = len(normalized_path) - for p in cache: - np = normalize_path(p) - if (np.startswith(normalized_path) and - np[prefix_len:prefix_len + 1] in (os.sep, '')): - result.append(p) - return result - - -def _update_zipimporter_cache(normalized_path, cache, updater=None): - """ - Update zipimporter cache data for a given normalized path. - - Any sub-path entries are processed as well, i.e. those corresponding to zip - archives embedded in other zip archives. - - Given updater is a callable taking a cache entry key and the original entry - (after already removing the entry from the cache), and expected to update - the entry and possibly return a new one to be inserted in its place. - Returning None indicates that the entry should not be replaced with a new - one. If no updater is given, the cache entries are simply removed without - any additional processing, the same as if the updater simply returned None. - - """ - for p in _collect_zipimporter_cache_entries(normalized_path, cache): - # N.B. pypy's custom zipimport._zip_directory_cache implementation does - # not support the complete dict interface: - # * Does not support item assignment, thus not allowing this function - # to be used only for removing existing cache entries. - # * Does not support the dict.pop() method, forcing us to use the - # get/del patterns instead. For more detailed information see the - # following links: - # https://github.com/pypa/setuptools/issues/202#issuecomment-202913420 - # http://bit.ly/2h9itJX - old_entry = cache[p] - del cache[p] - new_entry = updater and updater(p, old_entry) - if new_entry is not None: - cache[p] = new_entry - - -def _uncache(normalized_path, cache): - _update_zipimporter_cache(normalized_path, cache) - - -def _remove_and_clear_zip_directory_cache_data(normalized_path): - def clear_and_remove_cached_zip_archive_directory_data(path, old_entry): - old_entry.clear() - - _update_zipimporter_cache( - normalized_path, zipimport._zip_directory_cache, - updater=clear_and_remove_cached_zip_archive_directory_data) - - -# PyPy Python implementation does not allow directly writing to the -# zipimport._zip_directory_cache and so prevents us from attempting to correct -# its content. The best we can do there is clear the problematic cache content -# and have PyPy repopulate it as needed. The downside is that if there are any -# stale zipimport.zipimporter instances laying around, attempting to use them -# will fail due to not having its zip archive directory information available -# instead of being automatically corrected to use the new correct zip archive -# directory information. -if '__pypy__' in sys.builtin_module_names: - _replace_zip_directory_cache_data = \ - _remove_and_clear_zip_directory_cache_data -else: - - def _replace_zip_directory_cache_data(normalized_path): - def replace_cached_zip_archive_directory_data(path, old_entry): - # N.B. In theory, we could load the zip directory information just - # once for all updated path spellings, and then copy it locally and - # update its contained path strings to contain the correct - # spelling, but that seems like a way too invasive move (this cache - # structure is not officially documented anywhere and could in - # theory change with new Python releases) for no significant - # benefit. - old_entry.clear() - zipimport.zipimporter(path) - old_entry.update(zipimport._zip_directory_cache[path]) - return old_entry - - _update_zipimporter_cache( - normalized_path, zipimport._zip_directory_cache, - updater=replace_cached_zip_archive_directory_data) - - -def is_python(text, filename=''): - "Is this string a valid Python script?" - try: - compile(text, filename, 'exec') - except (SyntaxError, TypeError): - return False - else: - return True - - -def is_sh(executable): - """Determine if the specified executable is a .sh (contains a #! line)""" - try: - with io.open(executable, encoding='latin-1') as fp: - magic = fp.read(2) - except (OSError, IOError): - return executable - return magic == '#!' - - -def nt_quote_arg(arg): - """Quote a command line argument according to Windows parsing rules""" - return subprocess.list2cmdline([arg]) - - -def is_python_script(script_text, filename): - """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. - """ - if filename.endswith('.py') or filename.endswith('.pyw'): - return True # extension says it's Python - if is_python(script_text, filename): - return True # it's syntactically valid Python - if script_text.startswith('#!'): - # It begins with a '#!' line, so check if 'python' is in it somewhere - return 'python' in script_text.splitlines()[0].lower() - - return False # Not any Python I can recognize - - -try: - from os import chmod as _chmod -except ImportError: - # Jython compatibility - def _chmod(*args): - pass - - -def chmod(path, mode): - log.debug("changing mode of %s to %o", path, mode) - try: - _chmod(path, mode) - except os.error as e: - log.debug("chmod failed: %s", e) - - -class CommandSpec(list): - """ - A command spec for a #! header, specified as a list of arguments akin to - those passed to Popen. - """ - - options = [] - split_args = dict() - - @classmethod - def best(cls): - """ - Choose the best CommandSpec class based on environmental conditions. - """ - return cls - - @classmethod - def _sys_executable(cls): - _default = os.path.normpath(sys.executable) - return os.environ.get('__PYVENV_LAUNCHER__', _default) - - @classmethod - def from_param(cls, param): - """ - Construct a CommandSpec from a parameter to build_scripts, which may - be None. - """ - if isinstance(param, cls): - return param - if isinstance(param, list): - return cls(param) - if param is None: - return cls.from_environment() - # otherwise, assume it's a string. - return cls.from_string(param) - - @classmethod - def from_environment(cls): - return cls([cls._sys_executable()]) - - @classmethod - def from_string(cls, string): - """ - Construct a command spec from a simple string representing a command - line parseable by shlex.split. - """ - items = shlex.split(string, **cls.split_args) - return cls(items) - - def install_options(self, script_text): - self.options = shlex.split(self._extract_options(script_text)) - cmdline = subprocess.list2cmdline(self) - if not isascii(cmdline): - self.options[:0] = ['-x'] - - @staticmethod - def _extract_options(orig_script): - """ - Extract any options from the first line of the script. - """ - first = (orig_script + '\n').splitlines()[0] - match = _first_line_re().match(first) - options = match.group(1) or '' if match else '' - return options.strip() - - def as_header(self): - return self._render(self + list(self.options)) - - @staticmethod - def _strip_quotes(item): - _QUOTES = '"\'' - for q in _QUOTES: - if item.startswith(q) and item.endswith(q): - return item[1:-1] - return item - - @staticmethod - def _render(items): - cmdline = subprocess.list2cmdline( - CommandSpec._strip_quotes(item.strip()) for item in items) - return '#!' + cmdline + '\n' - - -# For pbr compat; will be removed in a future version. -sys_executable = CommandSpec._sys_executable() - - -class WindowsCommandSpec(CommandSpec): - split_args = dict(posix=False) - - -class ScriptWriter: - """ - Encapsulates behavior around writing entry point scripts for console and - gui apps. - """ - - template = textwrap.dedent(r""" - # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r - import re - import sys - - # for compatibility with easy_install; see #2198 - __requires__ = %(spec)r - - try: - from importlib.metadata import distribution - except ImportError: - try: - from importlib_metadata import distribution - except ImportError: - from pkg_resources import load_entry_point - - - def importlib_load_entry_point(spec, group, name): - dist_name, _, _ = spec.partition('==') - matches = ( - entry_point - for entry_point in distribution(dist_name).entry_points - if entry_point.group == group and entry_point.name == name - ) - return next(matches).load() - - - globals().setdefault('load_entry_point', importlib_load_entry_point) - - - if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)()) - """).lstrip() - - command_spec_class = CommandSpec - - @classmethod - def get_script_args(cls, dist, executable=None, wininst=False): - # for backward compatibility - warnings.warn("Use get_args", EasyInstallDeprecationWarning) - writer = (WindowsScriptWriter if wininst else ScriptWriter).best() - header = cls.get_script_header("", executable, wininst) - return writer.get_args(dist, header) - - @classmethod - def get_script_header(cls, script_text, executable=None, wininst=False): - # for backward compatibility - warnings.warn( - "Use get_header", EasyInstallDeprecationWarning, stacklevel=2) - if wininst: - executable = "python.exe" - return cls.get_header(script_text, executable) - - @classmethod - def get_args(cls, dist, header=None): - """ - Yield write_script() argument tuples for a distribution's - console_scripts and gui_scripts entry points. - """ - if header is None: - header = cls.get_header() - spec = str(dist.as_requirement()) - for type_ in 'console', 'gui': - group = type_ + '_scripts' - for name, ep in dist.get_entry_map(group).items(): - cls._ensure_safe_name(name) - script_text = cls.template % locals() - args = cls._get_script_args(type_, name, header, script_text) - for res in args: - yield res - - @staticmethod - def _ensure_safe_name(name): - """ - Prevent paths in *_scripts entry point names. - """ - has_path_sep = re.search(r'[\\/]', name) - if has_path_sep: - raise ValueError("Path separators not allowed in script names") - - @classmethod - def get_writer(cls, force_windows): - # for backward compatibility - warnings.warn("Use best", EasyInstallDeprecationWarning) - return WindowsScriptWriter.best() if force_windows else cls.best() - - @classmethod - def best(cls): - """ - Select the best ScriptWriter for this environment. - """ - if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'): - return WindowsScriptWriter.best() - else: - return cls - - @classmethod - def _get_script_args(cls, type_, name, header, script_text): - # Simply write the stub with no extension. - yield (name, header + script_text) - - @classmethod - def get_header(cls, script_text="", executable=None): - """Create a #! line, getting options (if any) from script_text""" - cmd = cls.command_spec_class.best().from_param(executable) - cmd.install_options(script_text) - return cmd.as_header() - - -class WindowsScriptWriter(ScriptWriter): - command_spec_class = WindowsCommandSpec - - @classmethod - def get_writer(cls): - # for backward compatibility - warnings.warn("Use best", EasyInstallDeprecationWarning) - return cls.best() - - @classmethod - def best(cls): - """ - Select the best ScriptWriter suitable for Windows - """ - writer_lookup = dict( - executable=WindowsExecutableLauncherWriter, - natural=cls, - ) - # for compatibility, use the executable launcher by default - launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable') - return writer_lookup[launcher] - - @classmethod - def _get_script_args(cls, type_, name, header, script_text): - "For Windows, add a .py extension" - ext = dict(console='.pya', gui='.pyw')[type_] - if ext not in os.environ['PATHEXT'].lower().split(';'): - msg = ( - "{ext} not listed in PATHEXT; scripts will not be " - "recognized as executables." - ).format(**locals()) - warnings.warn(msg, UserWarning) - old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe'] - old.remove(ext) - header = cls._adjust_header(type_, header) - blockers = [name + x for x in old] - yield name + ext, header + script_text, 't', blockers - - @classmethod - def _adjust_header(cls, type_, orig_header): - """ - Make sure 'pythonw' is used for gui and 'python' is used for - console (regardless of what sys.executable is). - """ - pattern = 'pythonw.exe' - repl = 'python.exe' - if type_ == 'gui': - pattern, repl = repl, pattern - pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE) - new_header = pattern_ob.sub(string=orig_header, repl=repl) - return new_header if cls._use_header(new_header) else orig_header - - @staticmethod - def _use_header(new_header): - """ - Should _adjust_header use the replaced header? - - On non-windows systems, always use. On - Windows systems, only use the replaced header if it resolves - to an executable on the system. - """ - clean_header = new_header[2:-1].strip('"') - return sys.platform != 'win32' or find_executable(clean_header) - - -class WindowsExecutableLauncherWriter(WindowsScriptWriter): - @classmethod - def _get_script_args(cls, type_, name, header, script_text): - """ - For Windows, add a .py extension and an .exe launcher - """ - if type_ == 'gui': - launcher_type = 'gui' - ext = '-script.pyw' - old = ['.pyw'] - else: - launcher_type = 'cli' - ext = '-script.py' - old = ['.py', '.pyc', '.pyo'] - hdr = cls._adjust_header(type_, header) - blockers = [name + x for x in old] - yield (name + ext, hdr + script_text, 't', blockers) - yield ( - name + '.exe', get_win_launcher(launcher_type), - 'b' # write in binary mode - ) - if not is_64bit(): - # install a manifest for the launcher to prevent Windows - # from detecting it as an installer (which it will for - # launchers like easy_install.exe). Consider only - # adding a manifest for launchers detected as installers. - # See Distribute #143 for details. - m_name = name + '.exe.manifest' - yield (m_name, load_launcher_manifest(name), 't') - - -# for backward-compatibility -get_script_args = ScriptWriter.get_script_args -get_script_header = ScriptWriter.get_script_header - - -def get_win_launcher(type): - """ - Load the Windows launcher (executable) suitable for launching a script. - - `type` should be either 'cli' or 'gui' - - Returns the executable as a byte string. - """ - launcher_fn = '%s.exe' % type - if is_64bit(): - if get_platform() == "win-arm64": - launcher_fn = launcher_fn.replace(".", "-arm64.") - else: - launcher_fn = launcher_fn.replace(".", "-64.") - else: - launcher_fn = launcher_fn.replace(".", "-32.") - return resource_string('setuptools', launcher_fn) - - -def load_launcher_manifest(name): - manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml') - return manifest.decode('utf-8') % vars() - - -def rmtree(path, ignore_errors=False, onerror=auto_chmod): - return shutil.rmtree(path, ignore_errors, onerror) - - -def current_umask(): - tmp = os.umask(0o022) - os.umask(tmp) - return tmp - - -def only_strs(values): - """ - Exclude non-str values. Ref #3063. - """ - return filter(lambda val: isinstance(val, str), values) - - -class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning): - """ - Warning for EasyInstall deprecations, bypassing suppression. - """ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/egg_info.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/egg_info.py deleted file mode 100755 index f221029..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/egg_info.py +++ /dev/null @@ -1,755 +0,0 @@ -"""setuptools.command.egg_info - -Create a distribution's .egg-info directory and contents""" - -from distutils.filelist import FileList as _FileList -from distutils.errors import DistutilsInternalError -from distutils.util import convert_path -from distutils import log -import distutils.errors -import distutils.filelist -import functools -import os -import re -import sys -import io -import warnings -import time -import collections - -from setuptools import Command -from setuptools.command.sdist import sdist -from setuptools.command.sdist import walk_revctrl -from setuptools.command.setopt import edit_config -from setuptools.command import bdist_egg -from pkg_resources import ( - parse_requirements, safe_name, parse_version, - safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) -import setuptools.unicode_utils as unicode_utils -from setuptools.glob import glob - -from setuptools.extern import packaging -from setuptools import SetuptoolsDeprecationWarning - - -def translate_pattern(glob): # noqa: C901 # is too complex (14) # FIXME - """ - Translate a file path glob like '*.txt' in to a regular expression. - This differs from fnmatch.translate which allows wildcards to match - directory separators. It also knows about '**/' which matches any number of - directories. - """ - pat = '' - - # This will split on '/' within [character classes]. This is deliberate. - chunks = glob.split(os.path.sep) - - sep = re.escape(os.sep) - valid_char = '[^%s]' % (sep,) - - for c, chunk in enumerate(chunks): - last_chunk = c == len(chunks) - 1 - - # Chunks that are a literal ** are globstars. They match anything. - if chunk == '**': - if last_chunk: - # Match anything if this is the last component - pat += '.*' - else: - # Match '(name/)*' - pat += '(?:%s+%s)*' % (valid_char, sep) - continue # Break here as the whole path component has been handled - - # Find any special characters in the remainder - i = 0 - chunk_len = len(chunk) - while i < chunk_len: - char = chunk[i] - if char == '*': - # Match any number of name characters - pat += valid_char + '*' - elif char == '?': - # Match a name character - pat += valid_char - elif char == '[': - # Character class - inner_i = i + 1 - # Skip initial !/] chars - if inner_i < chunk_len and chunk[inner_i] == '!': - inner_i = inner_i + 1 - if inner_i < chunk_len and chunk[inner_i] == ']': - inner_i = inner_i + 1 - - # Loop till the closing ] is found - while inner_i < chunk_len and chunk[inner_i] != ']': - inner_i = inner_i + 1 - - if inner_i >= chunk_len: - # Got to the end of the string without finding a closing ] - # Do not treat this as a matching group, but as a literal [ - pat += re.escape(char) - else: - # Grab the insides of the [brackets] - inner = chunk[i + 1:inner_i] - char_class = '' - - # Class negation - if inner[0] == '!': - char_class = '^' - inner = inner[1:] - - char_class += re.escape(inner) - pat += '[%s]' % (char_class,) - - # Skip to the end ] - i = inner_i - else: - pat += re.escape(char) - i += 1 - - # Join each chunk with the dir separator - if not last_chunk: - pat += sep - - pat += r'\Z' - return re.compile(pat, flags=re.MULTILINE | re.DOTALL) - - -class InfoCommon: - tag_build = None - tag_date = None - - @property - def name(self): - return safe_name(self.distribution.get_name()) - - def tagged_version(self): - return safe_version(self._maybe_tag(self.distribution.get_version())) - - def _maybe_tag(self, version): - """ - egg_info may be called more than once for a distribution, - in which case the version string already contains all tags. - """ - return ( - version if self.vtags and version.endswith(self.vtags) - else version + self.vtags - ) - - def tags(self): - version = '' - if self.tag_build: - version += self.tag_build - if self.tag_date: - version += time.strftime("-%Y%m%d") - return version - vtags = property(tags) - - -class egg_info(InfoCommon, Command): - description = "create a distribution's .egg-info directory" - - user_options = [ - ('egg-base=', 'e', "directory containing .egg-info directories" - " (default: top of the source tree)"), - ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), - ('tag-build=', 'b', "Specify explicit tag to add to version number"), - ('no-date', 'D', "Don't include date stamp [default]"), - ] - - boolean_options = ['tag-date'] - negative_opt = { - 'no-date': 'tag-date', - } - - def initialize_options(self): - self.egg_base = None - self.egg_name = None - self.egg_info = None - self.egg_version = None - self.broken_egg_info = False - - #################################### - # allow the 'tag_svn_revision' to be detected and - # set, supporting sdists built on older Setuptools. - @property - def tag_svn_revision(self): - pass - - @tag_svn_revision.setter - def tag_svn_revision(self, value): - pass - #################################### - - def save_version_info(self, filename): - """ - Materialize the value of date into the - build tag. Install build keys in a deterministic order - to avoid arbitrary reordering on subsequent builds. - """ - egg_info = collections.OrderedDict() - # follow the order these keys would have been added - # when PYTHONHASHSEED=0 - egg_info['tag_build'] = self.tags() - egg_info['tag_date'] = 0 - edit_config(filename, dict(egg_info=egg_info)) - - def finalize_options(self): - # Note: we need to capture the current value returned - # by `self.tagged_version()`, so we can later update - # `self.distribution.metadata.version` without - # repercussions. - self.egg_name = self.name - self.egg_version = self.tagged_version() - parsed_version = parse_version(self.egg_version) - - try: - is_version = isinstance(parsed_version, packaging.version.Version) - spec = ( - "%s==%s" if is_version else "%s===%s" - ) - list( - parse_requirements(spec % (self.egg_name, self.egg_version)) - ) - except ValueError as e: - raise distutils.errors.DistutilsOptionError( - "Invalid distribution name or version syntax: %s-%s" % - (self.egg_name, self.egg_version) - ) from e - - if self.egg_base is None: - dirs = self.distribution.package_dir - self.egg_base = (dirs or {}).get('', os.curdir) - - self.ensure_dirname('egg_base') - self.egg_info = to_filename(self.egg_name) + '.egg-info' - if self.egg_base != os.curdir: - self.egg_info = os.path.join(self.egg_base, self.egg_info) - if '-' in self.egg_name: - self.check_broken_egg_info() - - # Set package version for the benefit of dumber commands - # (e.g. sdist, bdist_wininst, etc.) - # - self.distribution.metadata.version = self.egg_version - - # If we bootstrapped around the lack of a PKG-INFO, as might be the - # case in a fresh checkout, make sure that any special tags get added - # to the version info - # - pd = self.distribution._patched_dist - if pd is not None and pd.key == self.egg_name.lower(): - pd._version = self.egg_version - pd._parsed_version = parse_version(self.egg_version) - self.distribution._patched_dist = None - - def write_or_delete_file(self, what, filename, data, force=False): - """Write `data` to `filename` or delete if empty - - If `data` is non-empty, this routine is the same as ``write_file()``. - If `data` is empty but not ``None``, this is the same as calling - ``delete_file(filename)`. If `data` is ``None``, then this is a no-op - unless `filename` exists, in which case a warning is issued about the - orphaned file (if `force` is false), or deleted (if `force` is true). - """ - if data: - self.write_file(what, filename, data) - elif os.path.exists(filename): - if data is None and not force: - log.warn( - "%s not set in setup(), but %s exists", what, filename - ) - return - else: - self.delete_file(filename) - - def write_file(self, what, filename, data): - """Write `data` to `filename` (if not a dry run) after announcing it - - `what` is used in a log message to identify what is being written - to the file. - """ - log.info("writing %s to %s", what, filename) - data = data.encode("utf-8") - if not self.dry_run: - f = open(filename, 'wb') - f.write(data) - f.close() - - def delete_file(self, filename): - """Delete `filename` (if not a dry run) after announcing it""" - log.info("deleting %s", filename) - if not self.dry_run: - os.unlink(filename) - - def run(self): - self.mkpath(self.egg_info) - os.utime(self.egg_info, None) - installer = self.distribution.fetch_build_egg - for ep in iter_entry_points('egg_info.writers'): - ep.require(installer=installer) - writer = ep.resolve() - writer(self, ep.name, os.path.join(self.egg_info, ep.name)) - - # Get rid of native_libs.txt if it was put there by older bdist_egg - nl = os.path.join(self.egg_info, "native_libs.txt") - if os.path.exists(nl): - self.delete_file(nl) - - self.find_sources() - - def find_sources(self): - """Generate SOURCES.txt manifest file""" - manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") - mm = manifest_maker(self.distribution) - mm.manifest = manifest_filename - mm.run() - self.filelist = mm.filelist - - def check_broken_egg_info(self): - bei = self.egg_name + '.egg-info' - if self.egg_base != os.curdir: - bei = os.path.join(self.egg_base, bei) - if os.path.exists(bei): - log.warn( - "-" * 78 + '\n' - "Note: Your current .egg-info directory has a '-' in its name;" - '\nthis will not work correctly with "setup.py develop".\n\n' - 'Please rename %s to %s to correct this problem.\n' + '-' * 78, - bei, self.egg_info - ) - self.broken_egg_info = self.egg_info - self.egg_info = bei # make it work for now - - -class FileList(_FileList): - # Implementations of the various MANIFEST.in commands - - def process_template_line(self, line): - # Parse the line: split it up, make sure the right number of words - # is there, and return the relevant words. 'action' is always - # defined: it's the first word of the line. Which of the other - # three are defined depends on the action; it'll be either - # patterns, (dir and patterns), or (dir_pattern). - (action, patterns, dir, dir_pattern) = self._parse_template_line(line) - - action_map = { - 'include': self.include, - 'exclude': self.exclude, - 'global-include': self.global_include, - 'global-exclude': self.global_exclude, - 'recursive-include': functools.partial( - self.recursive_include, dir, - ), - 'recursive-exclude': functools.partial( - self.recursive_exclude, dir, - ), - 'graft': self.graft, - 'prune': self.prune, - } - log_map = { - 'include': "warning: no files found matching '%s'", - 'exclude': ( - "warning: no previously-included files found " - "matching '%s'" - ), - 'global-include': ( - "warning: no files found matching '%s' " - "anywhere in distribution" - ), - 'global-exclude': ( - "warning: no previously-included files matching " - "'%s' found anywhere in distribution" - ), - 'recursive-include': ( - "warning: no files found matching '%s' " - "under directory '%s'" - ), - 'recursive-exclude': ( - "warning: no previously-included files matching " - "'%s' found under directory '%s'" - ), - 'graft': "warning: no directories found matching '%s'", - 'prune': "no previously-included directories found matching '%s'", - } - - try: - process_action = action_map[action] - except KeyError: - raise DistutilsInternalError( - "this cannot happen: invalid action '{action!s}'". - format(action=action), - ) - - # OK, now we know that the action is valid and we have the - # right number of words on the line for that action -- so we - # can proceed with minimal error-checking. - - action_is_recursive = action.startswith('recursive-') - if action in {'graft', 'prune'}: - patterns = [dir_pattern] - extra_log_args = (dir, ) if action_is_recursive else () - log_tmpl = log_map[action] - - self.debug_print( - ' '.join( - [action] + - ([dir] if action_is_recursive else []) + - patterns, - ) - ) - for pattern in patterns: - if not process_action(pattern): - log.warn(log_tmpl, pattern, *extra_log_args) - - def _remove_files(self, predicate): - """ - Remove all files from the file list that match the predicate. - Return True if any matching files were removed - """ - found = False - for i in range(len(self.files) - 1, -1, -1): - if predicate(self.files[i]): - self.debug_print(" removing " + self.files[i]) - del self.files[i] - found = True - return found - - def include(self, pattern): - """Include files that match 'pattern'.""" - found = [f for f in glob(pattern) if not os.path.isdir(f)] - self.extend(found) - return bool(found) - - def exclude(self, pattern): - """Exclude files that match 'pattern'.""" - match = translate_pattern(pattern) - return self._remove_files(match.match) - - def recursive_include(self, dir, pattern): - """ - Include all files anywhere in 'dir/' that match the pattern. - """ - full_pattern = os.path.join(dir, '**', pattern) - found = [f for f in glob(full_pattern, recursive=True) - if not os.path.isdir(f)] - self.extend(found) - return bool(found) - - def recursive_exclude(self, dir, pattern): - """ - Exclude any file anywhere in 'dir/' that match the pattern. - """ - match = translate_pattern(os.path.join(dir, '**', pattern)) - return self._remove_files(match.match) - - def graft(self, dir): - """Include all files from 'dir/'.""" - found = [ - item - for match_dir in glob(dir) - for item in distutils.filelist.findall(match_dir) - ] - self.extend(found) - return bool(found) - - def prune(self, dir): - """Filter out files from 'dir/'.""" - match = translate_pattern(os.path.join(dir, '**')) - return self._remove_files(match.match) - - def global_include(self, pattern): - """ - Include all files anywhere in the current directory that match the - pattern. This is very inefficient on large file trees. - """ - if self.allfiles is None: - self.findall() - match = translate_pattern(os.path.join('**', pattern)) - found = [f for f in self.allfiles if match.match(f)] - self.extend(found) - return bool(found) - - def global_exclude(self, pattern): - """ - Exclude all files anywhere that match the pattern. - """ - match = translate_pattern(os.path.join('**', pattern)) - return self._remove_files(match.match) - - def append(self, item): - if item.endswith('\r'): # Fix older sdists built on Windows - item = item[:-1] - path = convert_path(item) - - if self._safe_path(path): - self.files.append(path) - - def extend(self, paths): - self.files.extend(filter(self._safe_path, paths)) - - def _repair(self): - """ - Replace self.files with only safe paths - - Because some owners of FileList manipulate the underlying - ``files`` attribute directly, this method must be called to - repair those paths. - """ - self.files = list(filter(self._safe_path, self.files)) - - def _safe_path(self, path): - enc_warn = "'%s' not %s encodable -- skipping" - - # To avoid accidental trans-codings errors, first to unicode - u_path = unicode_utils.filesys_decode(path) - if u_path is None: - log.warn("'%s' in unexpected encoding -- skipping" % path) - return False - - # Must ensure utf-8 encodability - utf8_path = unicode_utils.try_encode(u_path, "utf-8") - if utf8_path is None: - log.warn(enc_warn, path, 'utf-8') - return False - - try: - # accept is either way checks out - if os.path.exists(u_path) or os.path.exists(utf8_path): - return True - # this will catch any encode errors decoding u_path - except UnicodeEncodeError: - log.warn(enc_warn, path, sys.getfilesystemencoding()) - - -class manifest_maker(sdist): - template = "MANIFEST.in" - - def initialize_options(self): - self.use_defaults = 1 - self.prune = 1 - self.manifest_only = 1 - self.force_manifest = 1 - - def finalize_options(self): - pass - - def run(self): - self.filelist = FileList() - if not os.path.exists(self.manifest): - self.write_manifest() # it must exist so it'll get in the list - self.add_defaults() - if os.path.exists(self.template): - self.read_template() - self.add_license_files() - self.prune_file_list() - self.filelist.sort() - self.filelist.remove_duplicates() - self.write_manifest() - - def _manifest_normalize(self, path): - path = unicode_utils.filesys_decode(path) - return path.replace(os.sep, '/') - - def write_manifest(self): - """ - Write the file list in 'self.filelist' to the manifest file - named by 'self.manifest'. - """ - self.filelist._repair() - - # Now _repairs should encodability, but not unicode - files = [self._manifest_normalize(f) for f in self.filelist.files] - msg = "writing manifest file '%s'" % self.manifest - self.execute(write_file, (self.manifest, files), msg) - - def warn(self, msg): - if not self._should_suppress_warning(msg): - sdist.warn(self, msg) - - @staticmethod - def _should_suppress_warning(msg): - """ - suppress missing-file warnings from sdist - """ - return re.match(r"standard file .*not found", msg) - - def add_defaults(self): - sdist.add_defaults(self) - self.filelist.append(self.template) - self.filelist.append(self.manifest) - rcfiles = list(walk_revctrl()) - if rcfiles: - self.filelist.extend(rcfiles) - elif os.path.exists(self.manifest): - self.read_manifest() - - if os.path.exists("setup.py"): - # setup.py should be included by default, even if it's not - # the script called to create the sdist - self.filelist.append("setup.py") - - ei_cmd = self.get_finalized_command('egg_info') - self.filelist.graft(ei_cmd.egg_info) - - def add_license_files(self): - license_files = self.distribution.metadata.license_files or [] - for lf in license_files: - log.info("adding license file '%s'", lf) - pass - self.filelist.extend(license_files) - - def prune_file_list(self): - build = self.get_finalized_command('build') - base_dir = self.distribution.get_fullname() - self.filelist.prune(build.build_base) - self.filelist.prune(base_dir) - sep = re.escape(os.sep) - self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep, - is_regex=1) - - def _safe_data_files(self, build_py): - """ - The parent class implementation of this method - (``sdist``) will try to include data files, which - might cause recursion problems when - ``include_package_data=True``. - - Therefore, avoid triggering any attempt of - analyzing/building the manifest again. - """ - if hasattr(build_py, 'get_data_files_without_manifest'): - return build_py.get_data_files_without_manifest() - - warnings.warn( - "Custom 'build_py' does not implement " - "'get_data_files_without_manifest'.\nPlease extend command classes" - " from setuptools instead of distutils.", - SetuptoolsDeprecationWarning - ) - return build_py.get_data_files() - - -def write_file(filename, contents): - """Create a file with the specified name and write 'contents' (a - sequence of strings without line terminators) to it. - """ - contents = "\n".join(contents) - - # assuming the contents has been vetted for utf-8 encoding - contents = contents.encode("utf-8") - - with open(filename, "wb") as f: # always write POSIX-style manifest - f.write(contents) - - -def write_pkg_info(cmd, basename, filename): - log.info("writing %s", filename) - if not cmd.dry_run: - metadata = cmd.distribution.metadata - metadata.version, oldver = cmd.egg_version, metadata.version - metadata.name, oldname = cmd.egg_name, metadata.name - - try: - # write unescaped data to PKG-INFO, so older pkg_resources - # can still parse it - metadata.write_pkg_info(cmd.egg_info) - finally: - metadata.name, metadata.version = oldname, oldver - - safe = getattr(cmd.distribution, 'zip_safe', None) - - bdist_egg.write_safety_flag(cmd.egg_info, safe) - - -def warn_depends_obsolete(cmd, basename, filename): - if os.path.exists(filename): - log.warn( - "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" - "Use the install_requires/extras_require setup() args instead." - ) - - -def _write_requirements(stream, reqs): - lines = yield_lines(reqs or ()) - - def append_cr(line): - return line + '\n' - lines = map(append_cr, lines) - stream.writelines(lines) - - -def write_requirements(cmd, basename, filename): - dist = cmd.distribution - data = io.StringIO() - _write_requirements(data, dist.install_requires) - extras_require = dist.extras_require or {} - for extra in sorted(extras_require): - data.write('\n[{extra}]\n'.format(**vars())) - _write_requirements(data, extras_require[extra]) - cmd.write_or_delete_file("requirements", filename, data.getvalue()) - - -def write_setup_requirements(cmd, basename, filename): - data = io.StringIO() - _write_requirements(data, cmd.distribution.setup_requires) - cmd.write_or_delete_file("setup-requirements", filename, data.getvalue()) - - -def write_toplevel_names(cmd, basename, filename): - pkgs = dict.fromkeys( - [ - k.split('.', 1)[0] - for k in cmd.distribution.iter_distribution_names() - ] - ) - cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n') - - -def overwrite_arg(cmd, basename, filename): - write_arg(cmd, basename, filename, True) - - -def write_arg(cmd, basename, filename, force=False): - argname = os.path.splitext(basename)[0] - value = getattr(cmd.distribution, argname, None) - if value is not None: - value = '\n'.join(value) + '\n' - cmd.write_or_delete_file(argname, filename, value, force) - - -def write_entries(cmd, basename, filename): - ep = cmd.distribution.entry_points - - if isinstance(ep, str) or ep is None: - data = ep - elif ep is not None: - data = [] - for section, contents in sorted(ep.items()): - if not isinstance(contents, str): - contents = EntryPoint.parse_group(section, contents) - contents = '\n'.join(sorted(map(str, contents.values()))) - data.append('[%s]\n%s\n\n' % (section, contents)) - data = ''.join(data) - - cmd.write_or_delete_file('entry points', filename, data, True) - - -def get_pkg_info_revision(): - """ - Get a -r### off of PKG-INFO Version in case this is an sdist of - a subversion revision. - """ - warnings.warn( - "get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning) - if os.path.exists('PKG-INFO'): - with io.open('PKG-INFO') as f: - for line in f: - match = re.match(r"Version:.*-r(\d+)\s*$", line) - if match: - return int(match.group(1)) - return 0 - - -class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning): - """Deprecated behavior warning for EggInfo, bypassing suppression.""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install.py deleted file mode 100755 index 35e54d2..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install.py +++ /dev/null @@ -1,132 +0,0 @@ -from distutils.errors import DistutilsArgError -import inspect -import glob -import warnings -import platform -import distutils.command.install as orig - -import setuptools - -# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for -# now. See https://github.com/pypa/setuptools/issues/199/ -_install = orig.install - - -class install(orig.install): - """Use easy_install to install the package, w/dependencies""" - - user_options = orig.install.user_options + [ - ('old-and-unmanageable', None, "Try not to use this!"), - ('single-version-externally-managed', None, - "used by system package builders to create 'flat' eggs"), - ] - boolean_options = orig.install.boolean_options + [ - 'old-and-unmanageable', 'single-version-externally-managed', - ] - new_commands = [ - ('install_egg_info', lambda self: True), - ('install_scripts', lambda self: True), - ] - _nc = dict(new_commands) - - def initialize_options(self): - - warnings.warn( - "setup.py install is deprecated. " - "Use build and pip and other standards-based tools.", - setuptools.SetuptoolsDeprecationWarning, - ) - - orig.install.initialize_options(self) - self.old_and_unmanageable = None - self.single_version_externally_managed = None - - def finalize_options(self): - orig.install.finalize_options(self) - if self.root: - self.single_version_externally_managed = True - elif self.single_version_externally_managed: - if not self.root and not self.record: - raise DistutilsArgError( - "You must specify --record or --root when building system" - " packages" - ) - - def handle_extra_path(self): - if self.root or self.single_version_externally_managed: - # explicit backward-compatibility mode, allow extra_path to work - return orig.install.handle_extra_path(self) - - # Ignore extra_path when installing an egg (or being run by another - # command without --root or --single-version-externally-managed - self.path_file = None - self.extra_dirs = '' - - def run(self): - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return orig.install.run(self) - - if not self._called_from_setup(inspect.currentframe()): - # Run in backward-compatibility mode to support bdist_* commands. - orig.install.run(self) - else: - self.do_egg_install() - - @staticmethod - def _called_from_setup(run_frame): - """ - Attempt to detect whether run() was called from setup() or by another - command. If called by setup(), the parent caller will be the - 'run_command' method in 'distutils.dist', and *its* caller will be - the 'run_commands' method. If called any other way, the - immediate caller *might* be 'run_command', but it won't have been - called by 'run_commands'. Return True in that case or if a call stack - is unavailable. Return False otherwise. - """ - if run_frame is None: - msg = "Call stack not available. bdist_* commands may fail." - warnings.warn(msg) - if platform.python_implementation() == 'IronPython': - msg = "For best results, pass -X:Frames to enable call stack." - warnings.warn(msg) - return True - res = inspect.getouterframes(run_frame)[2] - caller, = res[:1] - info = inspect.getframeinfo(caller) - caller_module = caller.f_globals.get('__name__', '') - return ( - caller_module == 'distutils.dist' - and info.function == 'run_commands' - ) - - def do_egg_install(self): - - easy_install = self.distribution.get_command_class('easy_install') - - cmd = easy_install( - self.distribution, args="x", root=self.root, record=self.record, - ) - cmd.ensure_finalized() # finalize before bdist_egg munges install cmd - cmd.always_copy_from = '.' # make sure local-dir eggs get installed - - # pick up setup-dir .egg files only: no .egg-info - cmd.package_index.scan(glob.glob('*.egg')) - - self.run_command('bdist_egg') - args = [self.distribution.get_command_obj('bdist_egg').egg_output] - - if setuptools.bootstrap_install_from: - # Bootstrap self-installation of setuptools - args.insert(0, setuptools.bootstrap_install_from) - - cmd.args = args - cmd.run(show_deprecation=False) - setuptools.bootstrap_install_from = None - - -# XXX Python 3.1 doesn't see _nc if this is inside the class -install.sub_commands = ( - [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] + - install.new_commands -) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_egg_info.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_egg_info.py deleted file mode 100755 index edc4718..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_egg_info.py +++ /dev/null @@ -1,62 +0,0 @@ -from distutils import log, dir_util -import os - -from setuptools import Command -from setuptools import namespaces -from setuptools.archive_util import unpack_archive -import pkg_resources - - -class install_egg_info(namespaces.Installer, Command): - """Install an .egg-info directory for the package""" - - description = "Install an .egg-info directory for the package" - - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ] - - def initialize_options(self): - self.install_dir = None - - def finalize_options(self): - self.set_undefined_options('install_lib', - ('install_dir', 'install_dir')) - ei_cmd = self.get_finalized_command("egg_info") - basename = pkg_resources.Distribution( - None, None, ei_cmd.egg_name, ei_cmd.egg_version - ).egg_name() + '.egg-info' - self.source = ei_cmd.egg_info - self.target = os.path.join(self.install_dir, basename) - self.outputs = [] - - def run(self): - self.run_command('egg_info') - if os.path.isdir(self.target) and not os.path.islink(self.target): - dir_util.remove_tree(self.target, dry_run=self.dry_run) - elif os.path.exists(self.target): - self.execute(os.unlink, (self.target,), "Removing " + self.target) - if not self.dry_run: - pkg_resources.ensure_directory(self.target) - self.execute( - self.copytree, (), "Copying %s to %s" % (self.source, self.target) - ) - self.install_namespaces() - - def get_outputs(self): - return self.outputs - - def copytree(self): - # Copy the .egg-info tree to site-packages - def skimmer(src, dst): - # filter out source-control directories; note that 'src' is always - # a '/'-separated path, regardless of platform. 'dst' is a - # platform-specific path. - for skip in '.svn/', 'CVS/': - if src.startswith(skip) or '/' + skip in src: - return None - self.outputs.append(dst) - log.debug("Copying %s to %s", src, dst) - return dst - - unpack_archive(self.source, self.target, skimmer) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_lib.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_lib.py deleted file mode 100755 index 2e9d875..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_lib.py +++ /dev/null @@ -1,122 +0,0 @@ -import os -import sys -from itertools import product, starmap -import distutils.command.install_lib as orig - - -class install_lib(orig.install_lib): - """Don't add compiled flags to filenames of non-Python files""" - - def run(self): - self.build() - outfiles = self.install() - if outfiles is not None: - # always compile, in case we have any extension stubs to deal with - self.byte_compile(outfiles) - - def get_exclusions(self): - """ - Return a collections.Sized collections.Container of paths to be - excluded for single_version_externally_managed installations. - """ - all_packages = ( - pkg - for ns_pkg in self._get_SVEM_NSPs() - for pkg in self._all_packages(ns_pkg) - ) - - excl_specs = product(all_packages, self._gen_exclusion_paths()) - return set(starmap(self._exclude_pkg_path, excl_specs)) - - def _exclude_pkg_path(self, pkg, exclusion_path): - """ - Given a package name and exclusion path within that package, - compute the full exclusion path. - """ - parts = pkg.split('.') + [exclusion_path] - return os.path.join(self.install_dir, *parts) - - @staticmethod - def _all_packages(pkg_name): - """ - >>> list(install_lib._all_packages('foo.bar.baz')) - ['foo.bar.baz', 'foo.bar', 'foo'] - """ - while pkg_name: - yield pkg_name - pkg_name, sep, child = pkg_name.rpartition('.') - - def _get_SVEM_NSPs(self): - """ - Get namespace packages (list) but only for - single_version_externally_managed installations and empty otherwise. - """ - # TODO: is it necessary to short-circuit here? i.e. what's the cost - # if get_finalized_command is called even when namespace_packages is - # False? - if not self.distribution.namespace_packages: - return [] - - install_cmd = self.get_finalized_command('install') - svem = install_cmd.single_version_externally_managed - - return self.distribution.namespace_packages if svem else [] - - @staticmethod - def _gen_exclusion_paths(): - """ - Generate file paths to be excluded for namespace packages (bytecode - cache files). - """ - # always exclude the package module itself - yield '__init__.py' - - yield '__init__.pyc' - yield '__init__.pyo' - - if not hasattr(sys, 'implementation'): - return - - base = os.path.join( - '__pycache__', '__init__.' + sys.implementation.cache_tag) - yield base + '.pyc' - yield base + '.pyo' - yield base + '.opt-1.pyc' - yield base + '.opt-2.pyc' - - def copy_tree( - self, infile, outfile, - preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1 - ): - assert preserve_mode and preserve_times and not preserve_symlinks - exclude = self.get_exclusions() - - if not exclude: - return orig.install_lib.copy_tree(self, infile, outfile) - - # Exclude namespace package __init__.py* files from the output - - from setuptools.archive_util import unpack_directory - from distutils import log - - outfiles = [] - - def pf(src, dst): - if dst in exclude: - log.warn("Skipping installation of %s (namespace package)", - dst) - return False - - log.info("copying %s -> %s", src, os.path.dirname(dst)) - outfiles.append(dst) - return dst - - unpack_directory(infile, outfile, pf) - return outfiles - - def get_outputs(self): - outputs = orig.install_lib.get_outputs(self) - exclude = self.get_exclusions() - if exclude: - return [f for f in outputs if f not in exclude] - return outputs diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_scripts.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_scripts.py deleted file mode 100755 index 9cd8eb0..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/install_scripts.py +++ /dev/null @@ -1,69 +0,0 @@ -from distutils import log -import distutils.command.install_scripts as orig -from distutils.errors import DistutilsModuleError -import os -import sys - -from pkg_resources import Distribution, PathMetadata, ensure_directory - - -class install_scripts(orig.install_scripts): - """Do normal script install, plus any egg_info wrapper scripts""" - - def initialize_options(self): - orig.install_scripts.initialize_options(self) - self.no_ep = False - - def run(self): - import setuptools.command.easy_install as ei - - self.run_command("egg_info") - if self.distribution.scripts: - orig.install_scripts.run(self) # run first to set up self.outfiles - else: - self.outfiles = [] - if self.no_ep: - # don't install entry point scripts into .egg file! - return - - ei_cmd = self.get_finalized_command("egg_info") - dist = Distribution( - ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), - ei_cmd.egg_name, ei_cmd.egg_version, - ) - bs_cmd = self.get_finalized_command('build_scripts') - exec_param = getattr(bs_cmd, 'executable', None) - try: - bw_cmd = self.get_finalized_command("bdist_wininst") - is_wininst = getattr(bw_cmd, '_is_running', False) - except (ImportError, DistutilsModuleError): - is_wininst = False - writer = ei.ScriptWriter - if is_wininst: - exec_param = "python.exe" - writer = ei.WindowsScriptWriter - if exec_param == sys.executable: - # In case the path to the Python executable contains a space, wrap - # it so it's not split up. - exec_param = [exec_param] - # resolve the writer to the environment - writer = writer.best() - cmd = writer.command_spec_class.best().from_param(exec_param) - for args in writer.get_args(dist, cmd.as_header()): - self.write_script(*args) - - def write_script(self, script_name, contents, mode="t", *ignored): - """Write an executable file to the scripts directory""" - from setuptools.command.easy_install import chmod, current_umask - - log.info("Installing %s script to %s", script_name, self.install_dir) - target = os.path.join(self.install_dir, script_name) - self.outfiles.append(target) - - mask = current_umask() - if not self.dry_run: - ensure_directory(target) - f = open(target, "w" + mode) - f.write(contents) - f.close() - chmod(target, 0o777 - mask) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/launcher manifest.xml b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/launcher manifest.xml deleted file mode 100644 index 5972a96..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/launcher manifest.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/py36compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/py36compat.py deleted file mode 100755 index 343547a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/py36compat.py +++ /dev/null @@ -1,134 +0,0 @@ -import os -from glob import glob -from distutils.util import convert_path -from distutils.command import sdist - - -class sdist_add_defaults: - """ - Mix-in providing forward-compatibility for functionality as found in - distutils on Python 3.7. - - Do not edit the code in this class except to update functionality - as implemented in distutils. Instead, override in the subclass. - """ - - def add_defaults(self): - """Add all the default files to self.filelist: - - README or README.txt - - setup.py - - test/test*.py - - all pure Python modules mentioned in setup script - - all files pointed by package_data (build_py) - - all files defined in data_files. - - all files defined as scripts. - - all C sources listed as part of extensions or C libraries - in the setup script (doesn't catch C headers!) - Warns if (README or README.txt) or setup.py are missing; everything - else is optional. - """ - self._add_defaults_standards() - self._add_defaults_optional() - self._add_defaults_python() - self._add_defaults_data_files() - self._add_defaults_ext() - self._add_defaults_c_libs() - self._add_defaults_scripts() - - @staticmethod - def _cs_path_exists(fspath): - """ - Case-sensitive path existence check - - >>> sdist_add_defaults._cs_path_exists(__file__) - True - >>> sdist_add_defaults._cs_path_exists(__file__.upper()) - False - """ - if not os.path.exists(fspath): - return False - # make absolute so we always have a directory - abspath = os.path.abspath(fspath) - directory, filename = os.path.split(abspath) - return filename in os.listdir(directory) - - def _add_defaults_standards(self): - standards = [self.READMES, self.distribution.script_name] - for fn in standards: - if isinstance(fn, tuple): - alts = fn - got_it = False - for fn in alts: - if self._cs_path_exists(fn): - got_it = True - self.filelist.append(fn) - break - - if not got_it: - self.warn("standard file not found: should have one of " + - ', '.join(alts)) - else: - if self._cs_path_exists(fn): - self.filelist.append(fn) - else: - self.warn("standard file '%s' not found" % fn) - - def _add_defaults_optional(self): - optional = ['test/test*.py', 'setup.cfg'] - for pattern in optional: - files = filter(os.path.isfile, glob(pattern)) - self.filelist.extend(files) - - def _add_defaults_python(self): - # build_py is used to get: - # - python modules - # - files defined in package_data - build_py = self.get_finalized_command('build_py') - - # getting python files - if self.distribution.has_pure_modules(): - self.filelist.extend(build_py.get_source_files()) - - # getting package_data files - # (computed in build_py.data_files by build_py.finalize_options) - for pkg, src_dir, build_dir, filenames in build_py.data_files: - for filename in filenames: - self.filelist.append(os.path.join(src_dir, filename)) - - def _add_defaults_data_files(self): - # getting distribution.data_files - if self.distribution.has_data_files(): - for item in self.distribution.data_files: - if isinstance(item, str): - # plain file - item = convert_path(item) - if os.path.isfile(item): - self.filelist.append(item) - else: - # a (dirname, filenames) tuple - dirname, filenames = item - for f in filenames: - f = convert_path(f) - if os.path.isfile(f): - self.filelist.append(f) - - def _add_defaults_ext(self): - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - self.filelist.extend(build_ext.get_source_files()) - - def _add_defaults_c_libs(self): - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.filelist.extend(build_clib.get_source_files()) - - def _add_defaults_scripts(self): - if self.distribution.has_scripts(): - build_scripts = self.get_finalized_command('build_scripts') - self.filelist.extend(build_scripts.get_source_files()) - - -if hasattr(sdist.sdist, '_add_defaults_standards'): - # disable the functionality already available upstream - class sdist_add_defaults: # noqa - pass diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/register.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/register.py deleted file mode 100755 index b8266b9..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/register.py +++ /dev/null @@ -1,18 +0,0 @@ -from distutils import log -import distutils.command.register as orig - -from setuptools.errors import RemovedCommandError - - -class register(orig.register): - """Formerly used to register packages on PyPI.""" - - def run(self): - msg = ( - "The register command has been removed, use twine to upload " - + "instead (https://pypi.org/p/twine)" - ) - - self.announce("ERROR: " + msg, log.ERROR) - - raise RemovedCommandError(msg) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/rotate.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/rotate.py deleted file mode 100755 index 74795ba..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/rotate.py +++ /dev/null @@ -1,64 +0,0 @@ -from distutils.util import convert_path -from distutils import log -from distutils.errors import DistutilsOptionError -import os -import shutil - -from setuptools import Command - - -class rotate(Command): - """Delete older distributions""" - - description = "delete older distributions, keeping N newest files" - user_options = [ - ('match=', 'm', "patterns to match (required)"), - ('dist-dir=', 'd', "directory where the distributions are"), - ('keep=', 'k', "number of matching distributions to keep"), - ] - - boolean_options = [] - - def initialize_options(self): - self.match = None - self.dist_dir = None - self.keep = None - - def finalize_options(self): - if self.match is None: - raise DistutilsOptionError( - "Must specify one or more (comma-separated) match patterns " - "(e.g. '.zip' or '.egg')" - ) - if self.keep is None: - raise DistutilsOptionError("Must specify number of files to keep") - try: - self.keep = int(self.keep) - except ValueError as e: - raise DistutilsOptionError("--keep must be an integer") from e - if isinstance(self.match, str): - self.match = [ - convert_path(p.strip()) for p in self.match.split(',') - ] - self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) - - def run(self): - self.run_command("egg_info") - from glob import glob - - for pattern in self.match: - pattern = self.distribution.get_name() + '*' + pattern - files = glob(os.path.join(self.dist_dir, pattern)) - files = [(os.path.getmtime(f), f) for f in files] - files.sort() - files.reverse() - - log.info("%d file(s) matching %s", len(files), pattern) - files = files[self.keep:] - for (t, f) in files: - log.info("Deleting %s", f) - if not self.dry_run: - if os.path.isdir(f): - shutil.rmtree(f) - else: - os.unlink(f) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/saveopts.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/saveopts.py deleted file mode 100755 index 611cec5..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/saveopts.py +++ /dev/null @@ -1,22 +0,0 @@ -from setuptools.command.setopt import edit_config, option_base - - -class saveopts(option_base): - """Save command-line options to a file""" - - description = "save supplied options to setup.cfg or other config file" - - def run(self): - dist = self.distribution - settings = {} - - for cmd in dist.command_options: - - if cmd == 'saveopts': - continue # don't save our own options! - - for opt, (src, val) in dist.get_option_dict(cmd).items(): - if src == "command line": - settings.setdefault(cmd, {})[opt] = val - - edit_config(self.filename, settings, self.dry_run) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/sdist.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/sdist.py deleted file mode 100755 index 0285b69..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/sdist.py +++ /dev/null @@ -1,196 +0,0 @@ -from distutils import log -import distutils.command.sdist as orig -import os -import sys -import io -import contextlib - -from .py36compat import sdist_add_defaults - -import pkg_resources - -_default_revctrl = list - - -def walk_revctrl(dirname=''): - """Find all files under revision control""" - for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): - for item in ep.load()(dirname): - yield item - - -class sdist(sdist_add_defaults, orig.sdist): - """Smart sdist that finds anything supported by revision control""" - - user_options = [ - ('formats=', None, - "formats for source distribution (comma-separated list)"), - ('keep-temp', 'k', - "keep the distribution tree around after creating " + - "archive file(s)"), - ('dist-dir=', 'd', - "directory to put the source distribution archive(s) in " - "[default: dist]"), - ('owner=', 'u', - "Owner name used when creating a tar file [default: current user]"), - ('group=', 'g', - "Group name used when creating a tar file [default: current group]"), - ] - - negative_opt = {} - - README_EXTENSIONS = ['', '.rst', '.txt', '.md'] - READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS) - - def run(self): - self.run_command('egg_info') - ei_cmd = self.get_finalized_command('egg_info') - self.filelist = ei_cmd.filelist - self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt')) - self.check_readme() - - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - self.make_distribution() - - dist_files = getattr(self.distribution, 'dist_files', []) - for file in self.archive_files: - data = ('sdist', '', file) - if data not in dist_files: - dist_files.append(data) - - def initialize_options(self): - orig.sdist.initialize_options(self) - - self._default_to_gztar() - - def _default_to_gztar(self): - # only needed on Python prior to 3.6. - if sys.version_info >= (3, 6, 0, 'beta', 1): - return - self.formats = ['gztar'] - - def make_distribution(self): - """ - Workaround for #516 - """ - with self._remove_os_link(): - orig.sdist.make_distribution(self) - - @staticmethod - @contextlib.contextmanager - def _remove_os_link(): - """ - In a context, remove and restore os.link if it exists - """ - - class NoValue: - pass - - orig_val = getattr(os, 'link', NoValue) - try: - del os.link - except Exception: - pass - try: - yield - finally: - if orig_val is not NoValue: - setattr(os, 'link', orig_val) - - def _add_defaults_optional(self): - super()._add_defaults_optional() - if os.path.isfile('pyproject.toml'): - self.filelist.append('pyproject.toml') - - def _add_defaults_python(self): - """getting python files""" - if self.distribution.has_pure_modules(): - build_py = self.get_finalized_command('build_py') - self.filelist.extend(build_py.get_source_files()) - self._add_data_files(self._safe_data_files(build_py)) - - def _safe_data_files(self, build_py): - """ - Since the ``sdist`` class is also used to compute the MANIFEST - (via :obj:`setuptools.command.egg_info.manifest_maker`), - there might be recursion problems when trying to obtain the list of - data_files and ``include_package_data=True`` (which in turn depends on - the files included in the MANIFEST). - - To avoid that, ``manifest_maker`` should be able to overwrite this - method and avoid recursive attempts to build/analyze the MANIFEST. - """ - return build_py.data_files - - def _add_data_files(self, data_files): - """ - Add data files as found in build_py.data_files. - """ - self.filelist.extend( - os.path.join(src_dir, name) - for _, src_dir, _, filenames in data_files - for name in filenames - ) - - def _add_defaults_data_files(self): - try: - super()._add_defaults_data_files() - except TypeError: - log.warn("data_files contains unexpected objects") - - def check_readme(self): - for f in self.READMES: - if os.path.exists(f): - return - else: - self.warn( - "standard file not found: should have one of " + - ', '.join(self.READMES) - ) - - def make_release_tree(self, base_dir, files): - orig.sdist.make_release_tree(self, base_dir, files) - - # Save any egg_info command line options used to create this sdist - dest = os.path.join(base_dir, 'setup.cfg') - if hasattr(os, 'link') and os.path.exists(dest): - # unlink and re-copy, since it might be hard-linked, and - # we don't want to change the source version - os.unlink(dest) - self.copy_file('setup.cfg', dest) - - self.get_finalized_command('egg_info').save_version_info(dest) - - def _manifest_is_not_generated(self): - # check for special comment used in 2.7.1 and higher - if not os.path.isfile(self.manifest): - return False - - with io.open(self.manifest, 'rb') as fp: - first_line = fp.readline() - return (first_line != - '# file GENERATED by distutils, do NOT edit\n'.encode()) - - def read_manifest(self): - """Read the manifest file (named by 'self.manifest') and use it to - fill in 'self.filelist', the list of files to include in the source - distribution. - """ - log.info("reading manifest file '%s'", self.manifest) - manifest = open(self.manifest, 'rb') - for line in manifest: - # The manifest must contain UTF-8. See #303. - try: - line = line.decode('UTF-8') - except UnicodeDecodeError: - log.warn("%r not UTF-8 decodable -- skipping" % line) - continue - # ignore comments and blank lines - line = line.strip() - if line.startswith('#') or not line: - continue - self.filelist.append(line) - manifest.close() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/setopt.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/setopt.py deleted file mode 100755 index 6358c04..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/setopt.py +++ /dev/null @@ -1,149 +0,0 @@ -from distutils.util import convert_path -from distutils import log -from distutils.errors import DistutilsOptionError -import distutils -import os -import configparser - -from setuptools import Command - -__all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] - - -def config_file(kind="local"): - """Get the filename of the distutils, local, global, or per-user config - - `kind` must be one of "local", "global", or "user" - """ - if kind == 'local': - return 'setup.cfg' - if kind == 'global': - return os.path.join( - os.path.dirname(distutils.__file__), 'distutils.cfg' - ) - if kind == 'user': - dot = os.name == 'posix' and '.' or '' - return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) - raise ValueError( - "config_file() type must be 'local', 'global', or 'user'", kind - ) - - -def edit_config(filename, settings, dry_run=False): - """Edit a configuration file to include `settings` - - `settings` is a dictionary of dictionaries or ``None`` values, keyed by - command/section name. A ``None`` value means to delete the entire section, - while a dictionary lists settings to be changed or deleted in that section. - A setting of ``None`` means to delete that setting. - """ - log.debug("Reading configuration from %s", filename) - opts = configparser.RawConfigParser() - opts.optionxform = lambda x: x - opts.read([filename]) - for section, options in settings.items(): - if options is None: - log.info("Deleting section [%s] from %s", section, filename) - opts.remove_section(section) - else: - if not opts.has_section(section): - log.debug("Adding new section [%s] to %s", section, filename) - opts.add_section(section) - for option, value in options.items(): - if value is None: - log.debug( - "Deleting %s.%s from %s", - section, option, filename - ) - opts.remove_option(section, option) - if not opts.options(section): - log.info("Deleting empty [%s] section from %s", - section, filename) - opts.remove_section(section) - else: - log.debug( - "Setting %s.%s to %r in %s", - section, option, value, filename - ) - opts.set(section, option, value) - - log.info("Writing %s", filename) - if not dry_run: - with open(filename, 'w') as f: - opts.write(f) - - -class option_base(Command): - """Abstract base class for commands that mess with config files""" - - user_options = [ - ('global-config', 'g', - "save options to the site-wide distutils.cfg file"), - ('user-config', 'u', - "save options to the current user's pydistutils.cfg file"), - ('filename=', 'f', - "configuration file to use (default=setup.cfg)"), - ] - - boolean_options = [ - 'global-config', 'user-config', - ] - - def initialize_options(self): - self.global_config = None - self.user_config = None - self.filename = None - - def finalize_options(self): - filenames = [] - if self.global_config: - filenames.append(config_file('global')) - if self.user_config: - filenames.append(config_file('user')) - if self.filename is not None: - filenames.append(self.filename) - if not filenames: - filenames.append(config_file('local')) - if len(filenames) > 1: - raise DistutilsOptionError( - "Must specify only one configuration file option", - filenames - ) - self.filename, = filenames - - -class setopt(option_base): - """Save command-line options to a file""" - - description = "set an option in setup.cfg or another config file" - - user_options = [ - ('command=', 'c', 'command to set an option for'), - ('option=', 'o', 'option to set'), - ('set-value=', 's', 'value of the option'), - ('remove', 'r', 'remove (unset) the value'), - ] + option_base.user_options - - boolean_options = option_base.boolean_options + ['remove'] - - def initialize_options(self): - option_base.initialize_options(self) - self.command = None - self.option = None - self.set_value = None - self.remove = None - - def finalize_options(self): - option_base.finalize_options(self) - if self.command is None or self.option is None: - raise DistutilsOptionError("Must specify --command *and* --option") - if self.set_value is None and not self.remove: - raise DistutilsOptionError("Must specify --set-value or --remove") - - def run(self): - edit_config( - self.filename, { - self.command: {self.option.replace('-', '_'): self.set_value} - }, - self.dry_run - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/test.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/test.py deleted file mode 100755 index 4a389e4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/test.py +++ /dev/null @@ -1,252 +0,0 @@ -import os -import operator -import sys -import contextlib -import itertools -import unittest -from distutils.errors import DistutilsError, DistutilsOptionError -from distutils import log -from unittest import TestLoader - -from pkg_resources import ( - resource_listdir, - resource_exists, - normalize_path, - working_set, - evaluate_marker, - add_activation_listener, - require, - EntryPoint, -) -from setuptools import Command -from setuptools.extern.more_itertools import unique_everseen - - -class ScanningLoader(TestLoader): - def __init__(self): - TestLoader.__init__(self) - self._visited = set() - - def loadTestsFromModule(self, module, pattern=None): - """Return a suite of all tests cases contained in the given module - - If the module is a package, load tests from all the modules in it. - If the module has an ``additional_tests`` function, call it and add - the return value to the tests. - """ - if module in self._visited: - return None - self._visited.add(module) - - tests = [] - tests.append(TestLoader.loadTestsFromModule(self, module)) - - if hasattr(module, "additional_tests"): - tests.append(module.additional_tests()) - - if hasattr(module, '__path__'): - for file in resource_listdir(module.__name__, ''): - if file.endswith('.py') and file != '__init__.py': - submodule = module.__name__ + '.' + file[:-3] - else: - if resource_exists(module.__name__, file + '/__init__.py'): - submodule = module.__name__ + '.' + file - else: - continue - tests.append(self.loadTestsFromName(submodule)) - - if len(tests) != 1: - return self.suiteClass(tests) - else: - return tests[0] # don't create a nested suite for only one return - - -# adapted from jaraco.classes.properties:NonDataProperty -class NonDataProperty: - def __init__(self, fget): - self.fget = fget - - def __get__(self, obj, objtype=None): - if obj is None: - return self - return self.fget(obj) - - -class test(Command): - """Command to run unit tests after in-place build""" - - description = "run unit tests after in-place build (deprecated)" - - user_options = [ - ('test-module=', 'm', "Run 'test_suite' in specified module"), - ( - 'test-suite=', - 's', - "Run single test, case or suite (e.g. 'module.test_suite')", - ), - ('test-runner=', 'r', "Test runner to use"), - ] - - def initialize_options(self): - self.test_suite = None - self.test_module = None - self.test_loader = None - self.test_runner = None - - def finalize_options(self): - - if self.test_suite and self.test_module: - msg = "You may specify a module or a suite, but not both" - raise DistutilsOptionError(msg) - - if self.test_suite is None: - if self.test_module is None: - self.test_suite = self.distribution.test_suite - else: - self.test_suite = self.test_module + ".test_suite" - - if self.test_loader is None: - self.test_loader = getattr(self.distribution, 'test_loader', None) - if self.test_loader is None: - self.test_loader = "setuptools.command.test:ScanningLoader" - if self.test_runner is None: - self.test_runner = getattr(self.distribution, 'test_runner', None) - - @NonDataProperty - def test_args(self): - return list(self._test_args()) - - def _test_args(self): - if not self.test_suite and sys.version_info >= (2, 7): - yield 'discover' - if self.verbose: - yield '--verbose' - if self.test_suite: - yield self.test_suite - - def with_project_on_sys_path(self, func): - """ - Backward compatibility for project_on_sys_path context. - """ - with self.project_on_sys_path(): - func() - - @contextlib.contextmanager - def project_on_sys_path(self, include_dists=[]): - self.run_command('egg_info') - - # Build extensions in-place - self.reinitialize_command('build_ext', inplace=1) - self.run_command('build_ext') - - ei_cmd = self.get_finalized_command("egg_info") - - old_path = sys.path[:] - old_modules = sys.modules.copy() - - try: - project_path = normalize_path(ei_cmd.egg_base) - sys.path.insert(0, project_path) - working_set.__init__() - add_activation_listener(lambda dist: dist.activate()) - require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version)) - with self.paths_on_pythonpath([project_path]): - yield - finally: - sys.path[:] = old_path - sys.modules.clear() - sys.modules.update(old_modules) - working_set.__init__() - - @staticmethod - @contextlib.contextmanager - def paths_on_pythonpath(paths): - """ - Add the indicated paths to the head of the PYTHONPATH environment - variable so that subprocesses will also see the packages at - these paths. - - Do this in a context that restores the value on exit. - """ - nothing = object() - orig_pythonpath = os.environ.get('PYTHONPATH', nothing) - current_pythonpath = os.environ.get('PYTHONPATH', '') - try: - prefix = os.pathsep.join(unique_everseen(paths)) - to_join = filter(None, [prefix, current_pythonpath]) - new_path = os.pathsep.join(to_join) - if new_path: - os.environ['PYTHONPATH'] = new_path - yield - finally: - if orig_pythonpath is nothing: - os.environ.pop('PYTHONPATH', None) - else: - os.environ['PYTHONPATH'] = orig_pythonpath - - @staticmethod - def install_dists(dist): - """ - Install the requirements indicated by self.distribution and - return an iterable of the dists that were built. - """ - ir_d = dist.fetch_build_eggs(dist.install_requires) - tr_d = dist.fetch_build_eggs(dist.tests_require or []) - er_d = dist.fetch_build_eggs( - v - for k, v in dist.extras_require.items() - if k.startswith(':') and evaluate_marker(k[1:]) - ) - return itertools.chain(ir_d, tr_d, er_d) - - def run(self): - self.announce( - "WARNING: Testing via this command is deprecated and will be " - "removed in a future version. Users looking for a generic test " - "entry point independent of test runner are encouraged to use " - "tox.", - log.WARN, - ) - - installed_dists = self.install_dists(self.distribution) - - cmd = ' '.join(self._argv) - if self.dry_run: - self.announce('skipping "%s" (dry run)' % cmd) - return - - self.announce('running "%s"' % cmd) - - paths = map(operator.attrgetter('location'), installed_dists) - with self.paths_on_pythonpath(paths): - with self.project_on_sys_path(): - self.run_tests() - - def run_tests(self): - test = unittest.main( - None, - None, - self._argv, - testLoader=self._resolve_as_ep(self.test_loader), - testRunner=self._resolve_as_ep(self.test_runner), - exit=False, - ) - if not test.result.wasSuccessful(): - msg = 'Test failed: %s' % test.result - self.announce(msg, log.ERROR) - raise DistutilsError(msg) - - @property - def _argv(self): - return ['unittest'] + self.test_args - - @staticmethod - def _resolve_as_ep(val): - """ - Load the indicated attribute value, called, as a as if it were - specified as an entry point. - """ - if val is None: - return - parsed = EntryPoint.parse("x=" + val) - return parsed.resolve()() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/upload.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/upload.py deleted file mode 100755 index ec7f81e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/upload.py +++ /dev/null @@ -1,17 +0,0 @@ -from distutils import log -from distutils.command import upload as orig - -from setuptools.errors import RemovedCommandError - - -class upload(orig.upload): - """Formerly used to upload packages to PyPI.""" - - def run(self): - msg = ( - "The upload command has been removed, use twine to upload " - + "instead (https://pypi.org/p/twine)" - ) - - self.announce("ERROR: " + msg, log.ERROR) - raise RemovedCommandError(msg) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/upload_docs.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/upload_docs.py deleted file mode 100755 index 845bff4..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/command/upload_docs.py +++ /dev/null @@ -1,202 +0,0 @@ -# -*- coding: utf-8 -*- -"""upload_docs - -Implements a Distutils 'upload_docs' subcommand (upload documentation to -sites other than PyPi such as devpi). -""" - -from base64 import standard_b64encode -from distutils import log -from distutils.errors import DistutilsOptionError -import os -import socket -import zipfile -import tempfile -import shutil -import itertools -import functools -import http.client -import urllib.parse - -from pkg_resources import iter_entry_points -from .upload import upload - - -def _encode(s): - return s.encode('utf-8', 'surrogateescape') - - -class upload_docs(upload): - # override the default repository as upload_docs isn't - # supported by Warehouse (and won't be). - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/' - - description = 'Upload documentation to sites other than PyPi such as devpi' - - user_options = [ - ('repository=', 'r', - "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY), - ('show-response', None, - 'display full response text from server'), - ('upload-dir=', None, 'directory to upload'), - ] - boolean_options = upload.boolean_options - - def has_sphinx(self): - if self.upload_dir is None: - for ep in iter_entry_points('distutils.commands', 'build_sphinx'): - return True - - sub_commands = [('build_sphinx', has_sphinx)] - - def initialize_options(self): - upload.initialize_options(self) - self.upload_dir = None - self.target_dir = None - - def finalize_options(self): - upload.finalize_options(self) - if self.upload_dir is None: - if self.has_sphinx(): - build_sphinx = self.get_finalized_command('build_sphinx') - self.target_dir = dict(build_sphinx.builder_target_dirs)['html'] - else: - build = self.get_finalized_command('build') - self.target_dir = os.path.join(build.build_base, 'docs') - else: - self.ensure_dirname('upload_dir') - self.target_dir = self.upload_dir - if 'pypi.python.org' in self.repository: - log.warn("Upload_docs command is deprecated for PyPi. Use RTD instead.") - self.announce('Using upload directory %s' % self.target_dir) - - def create_zipfile(self, filename): - zip_file = zipfile.ZipFile(filename, "w") - try: - self.mkpath(self.target_dir) # just in case - for root, dirs, files in os.walk(self.target_dir): - if root == self.target_dir and not files: - tmpl = "no files found in upload directory '%s'" - raise DistutilsOptionError(tmpl % self.target_dir) - for name in files: - full = os.path.join(root, name) - relative = root[len(self.target_dir):].lstrip(os.path.sep) - dest = os.path.join(relative, name) - zip_file.write(full, dest) - finally: - zip_file.close() - - def run(self): - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - tmp_dir = tempfile.mkdtemp() - name = self.distribution.metadata.get_name() - zip_file = os.path.join(tmp_dir, "%s.zip" % name) - try: - self.create_zipfile(zip_file) - self.upload_file(zip_file) - finally: - shutil.rmtree(tmp_dir) - - @staticmethod - def _build_part(item, sep_boundary): - key, values = item - title = '\nContent-Disposition: form-data; name="%s"' % key - # handle multiple entries for the same name - if not isinstance(values, list): - values = [values] - for value in values: - if isinstance(value, tuple): - title += '; filename="%s"' % value[0] - value = value[1] - else: - value = _encode(value) - yield sep_boundary - yield _encode(title) - yield b"\n\n" - yield value - if value and value[-1:] == b'\r': - yield b'\n' # write an extra newline (lurve Macs) - - @classmethod - def _build_multipart(cls, data): - """ - Build up the MIME payload for the POST data - """ - boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = b'\n--' + boundary.encode('ascii') - end_boundary = sep_boundary + b'--' - end_items = end_boundary, b"\n", - builder = functools.partial( - cls._build_part, - sep_boundary=sep_boundary, - ) - part_groups = map(builder, data.items()) - parts = itertools.chain.from_iterable(part_groups) - body_items = itertools.chain(parts, end_items) - content_type = 'multipart/form-data; boundary=%s' % boundary - return b''.join(body_items), content_type - - def upload_file(self, filename): - with open(filename, 'rb') as f: - content = f.read() - meta = self.distribution.metadata - data = { - ':action': 'doc_upload', - 'name': meta.get_name(), - 'content': (os.path.basename(filename), content), - } - # set up the authentication - credentials = _encode(self.username + ':' + self.password) - credentials = standard_b64encode(credentials).decode('ascii') - auth = "Basic " + credentials - - body, ct = self._build_multipart(data) - - msg = "Submitting documentation to %s" % (self.repository) - self.announce(msg, log.INFO) - - # build the Request - # We can't use urllib2 since we need to send the Basic - # auth right with the first request - schema, netloc, url, params, query, fragments = \ - urllib.parse.urlparse(self.repository) - assert not params and not query and not fragments - if schema == 'http': - conn = http.client.HTTPConnection(netloc) - elif schema == 'https': - conn = http.client.HTTPSConnection(netloc) - else: - raise AssertionError("unsupported schema " + schema) - - data = '' - try: - conn.connect() - conn.putrequest("POST", url) - content_type = ct - conn.putheader('Content-type', content_type) - conn.putheader('Content-length', str(len(body))) - conn.putheader('Authorization', auth) - conn.endheaders() - conn.send(body) - except socket.error as e: - self.announce(str(e), log.ERROR) - return - - r = conn.getresponse() - if r.status == 200: - msg = 'Server response (%s): %s' % (r.status, r.reason) - self.announce(msg, log.INFO) - elif r.status == 301: - location = r.getheader('Location') - if location is None: - location = 'https://pythonhosted.org/%s/' % meta.get_name() - msg = 'Upload successful. Visit %s' % location - self.announce(msg, log.INFO) - else: - msg = 'Upload failed (%s): %s' % (r.status, r.reason) - self.announce(msg, log.ERROR) - if self.show_response: - print('-' * 75, r.read(), '-' * 75) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/config.py deleted file mode 100755 index b4e968e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/config.py +++ /dev/null @@ -1,751 +0,0 @@ -import ast -import io -import os -import sys - -import warnings -import functools -import importlib -from collections import defaultdict -from functools import partial -from functools import wraps -from glob import iglob -import contextlib - -from distutils.errors import DistutilsOptionError, DistutilsFileError -from setuptools.extern.packaging.version import Version, InvalidVersion -from setuptools.extern.packaging.specifiers import SpecifierSet - - -class StaticModule: - """ - Attempt to load the module by the name - """ - - def __init__(self, name): - spec = importlib.util.find_spec(name) - with open(spec.origin) as strm: - src = strm.read() - module = ast.parse(src) - vars(self).update(locals()) - del self.self - - def __getattr__(self, attr): - try: - return next( - ast.literal_eval(statement.value) - for statement in self.module.body - if isinstance(statement, ast.Assign) - for target in statement.targets - if isinstance(target, ast.Name) and target.id == attr - ) - except Exception as e: - raise AttributeError( - "{self.name} has no attribute {attr}".format(**locals()) - ) from e - - -@contextlib.contextmanager -def patch_path(path): - """ - Add path to front of sys.path for the duration of the context. - """ - try: - sys.path.insert(0, path) - yield - finally: - sys.path.remove(path) - - -def read_configuration(filepath, find_others=False, ignore_option_errors=False): - """Read given configuration file and returns options from it as a dict. - - :param str|unicode filepath: Path to configuration file - to get options from. - - :param bool find_others: Whether to search for other configuration files - which could be on in various places. - - :param bool ignore_option_errors: Whether to silently ignore - options, values of which could not be resolved (e.g. due to exceptions - in directives such as file:, attr:, etc.). - If False exceptions are propagated as expected. - - :rtype: dict - """ - from setuptools.dist import Distribution, _Distribution - - filepath = os.path.abspath(filepath) - - if not os.path.isfile(filepath): - raise DistutilsFileError('Configuration file %s does not exist.' % filepath) - - current_directory = os.getcwd() - os.chdir(os.path.dirname(filepath)) - - try: - dist = Distribution() - - filenames = dist.find_config_files() if find_others else [] - if filepath not in filenames: - filenames.append(filepath) - - _Distribution.parse_config_files(dist, filenames=filenames) - - handlers = parse_configuration( - dist, dist.command_options, ignore_option_errors=ignore_option_errors - ) - - finally: - os.chdir(current_directory) - - return configuration_to_dict(handlers) - - -def _get_option(target_obj, key): - """ - Given a target object and option key, get that option from - the target object, either through a get_{key} method or - from an attribute directly. - """ - getter_name = 'get_{key}'.format(**locals()) - by_attribute = functools.partial(getattr, target_obj, key) - getter = getattr(target_obj, getter_name, by_attribute) - return getter() - - -def configuration_to_dict(handlers): - """Returns configuration data gathered by given handlers as a dict. - - :param list[ConfigHandler] handlers: Handlers list, - usually from parse_configuration() - - :rtype: dict - """ - config_dict = defaultdict(dict) - - for handler in handlers: - for option in handler.set_options: - value = _get_option(handler.target_obj, option) - config_dict[handler.section_prefix][option] = value - - return config_dict - - -def parse_configuration(distribution, command_options, ignore_option_errors=False): - """Performs additional parsing of configuration options - for a distribution. - - Returns a list of used option handlers. - - :param Distribution distribution: - :param dict command_options: - :param bool ignore_option_errors: Whether to silently ignore - options, values of which could not be resolved (e.g. due to exceptions - in directives such as file:, attr:, etc.). - If False exceptions are propagated as expected. - :rtype: list - """ - options = ConfigOptionsHandler(distribution, command_options, ignore_option_errors) - options.parse() - - meta = ConfigMetadataHandler( - distribution.metadata, - command_options, - ignore_option_errors, - distribution.package_dir, - ) - meta.parse() - - return meta, options - - -class ConfigHandler: - """Handles metadata supplied in configuration files.""" - - section_prefix = None - """Prefix for config sections handled by this handler. - Must be provided by class heirs. - - """ - - aliases = {} - """Options aliases. - For compatibility with various packages. E.g.: d2to1 and pbr. - Note: `-` in keys is replaced with `_` by config parser. - - """ - - def __init__(self, target_obj, options, ignore_option_errors=False): - sections = {} - - section_prefix = self.section_prefix - for section_name, section_options in options.items(): - if not section_name.startswith(section_prefix): - continue - - section_name = section_name.replace(section_prefix, '').strip('.') - sections[section_name] = section_options - - self.ignore_option_errors = ignore_option_errors - self.target_obj = target_obj - self.sections = sections - self.set_options = [] - - @property - def parsers(self): - """Metadata item name to parser function mapping.""" - raise NotImplementedError( - '%s must provide .parsers property' % self.__class__.__name__ - ) - - def __setitem__(self, option_name, value): - unknown = tuple() - target_obj = self.target_obj - - # Translate alias into real name. - option_name = self.aliases.get(option_name, option_name) - - current_value = getattr(target_obj, option_name, unknown) - - if current_value is unknown: - raise KeyError(option_name) - - if current_value: - # Already inhabited. Skipping. - return - - skip_option = False - parser = self.parsers.get(option_name) - if parser: - try: - value = parser(value) - - except Exception: - skip_option = True - if not self.ignore_option_errors: - raise - - if skip_option: - return - - setter = getattr(target_obj, 'set_%s' % option_name, None) - if setter is None: - setattr(target_obj, option_name, value) - else: - setter(value) - - self.set_options.append(option_name) - - @classmethod - def _parse_list(cls, value, separator=','): - """Represents value as a list. - - Value is split either by separator (defaults to comma) or by lines. - - :param value: - :param separator: List items separator character. - :rtype: list - """ - if isinstance(value, list): # _get_parser_compound case - return value - - if '\n' in value: - value = value.splitlines() - else: - value = value.split(separator) - - return [chunk.strip() for chunk in value if chunk.strip()] - - @classmethod - def _parse_list_glob(cls, value, separator=','): - """Equivalent to _parse_list() but expands any glob patterns using glob(). - - However, unlike with glob() calls, the results remain relative paths. - - :param value: - :param separator: List items separator character. - :rtype: list - """ - glob_characters = ('*', '?', '[', ']', '{', '}') - values = cls._parse_list(value, separator=separator) - expanded_values = [] - for value in values: - - # Has globby characters? - if any(char in value for char in glob_characters): - # then expand the glob pattern while keeping paths *relative*: - expanded_values.extend(sorted( - os.path.relpath(path, os.getcwd()) - for path in iglob(os.path.abspath(value)))) - - else: - # take the value as-is: - expanded_values.append(value) - - return expanded_values - - @classmethod - def _parse_dict(cls, value): - """Represents value as a dict. - - :param value: - :rtype: dict - """ - separator = '=' - result = {} - for line in cls._parse_list(value): - key, sep, val = line.partition(separator) - if sep != separator: - raise DistutilsOptionError( - 'Unable to parse option value to dict: %s' % value - ) - result[key.strip()] = val.strip() - - return result - - @classmethod - def _parse_bool(cls, value): - """Represents value as boolean. - - :param value: - :rtype: bool - """ - value = value.lower() - return value in ('1', 'true', 'yes') - - @classmethod - def _exclude_files_parser(cls, key): - """Returns a parser function to make sure field inputs - are not files. - - Parses a value after getting the key so error messages are - more informative. - - :param key: - :rtype: callable - """ - - def parser(value): - exclude_directive = 'file:' - if value.startswith(exclude_directive): - raise ValueError( - 'Only strings are accepted for the {0} field, ' - 'files are not accepted'.format(key) - ) - return value - - return parser - - @classmethod - def _parse_file(cls, value): - """Represents value as a string, allowing including text - from nearest files using `file:` directive. - - Directive is sandboxed and won't reach anything outside - directory with setup.py. - - Examples: - file: README.rst, CHANGELOG.md, src/file.txt - - :param str value: - :rtype: str - """ - include_directive = 'file:' - - if not isinstance(value, str): - return value - - if not value.startswith(include_directive): - return value - - spec = value[len(include_directive) :] - filepaths = (os.path.abspath(path.strip()) for path in spec.split(',')) - return '\n'.join( - cls._read_file(path) - for path in filepaths - if (cls._assert_local(path) or True) and os.path.isfile(path) - ) - - @staticmethod - def _assert_local(filepath): - if not filepath.startswith(os.getcwd()): - raise DistutilsOptionError('`file:` directive can not access %s' % filepath) - - @staticmethod - def _read_file(filepath): - with io.open(filepath, encoding='utf-8') as f: - return f.read() - - @classmethod - def _parse_attr(cls, value, package_dir=None): - """Represents value as a module attribute. - - Examples: - attr: package.attr - attr: package.module.attr - - :param str value: - :rtype: str - """ - attr_directive = 'attr:' - if not value.startswith(attr_directive): - return value - - attrs_path = value.replace(attr_directive, '').strip().split('.') - attr_name = attrs_path.pop() - - module_name = '.'.join(attrs_path) - module_name = module_name or '__init__' - - parent_path = os.getcwd() - if package_dir: - if attrs_path[0] in package_dir: - # A custom path was specified for the module we want to import - custom_path = package_dir[attrs_path[0]] - parts = custom_path.rsplit('/', 1) - if len(parts) > 1: - parent_path = os.path.join(os.getcwd(), parts[0]) - module_name = parts[1] - else: - module_name = custom_path - elif '' in package_dir: - # A custom parent directory was specified for all root modules - parent_path = os.path.join(os.getcwd(), package_dir['']) - - with patch_path(parent_path): - try: - # attempt to load value statically - return getattr(StaticModule(module_name), attr_name) - except Exception: - # fallback to simple import - module = importlib.import_module(module_name) - - return getattr(module, attr_name) - - @classmethod - def _get_parser_compound(cls, *parse_methods): - """Returns parser function to represents value as a list. - - Parses a value applying given methods one after another. - - :param parse_methods: - :rtype: callable - """ - - def parse(value): - parsed = value - - for method in parse_methods: - parsed = method(parsed) - - return parsed - - return parse - - @classmethod - def _parse_section_to_dict(cls, section_options, values_parser=None): - """Parses section options into a dictionary. - - Optionally applies a given parser to values. - - :param dict section_options: - :param callable values_parser: - :rtype: dict - """ - value = {} - values_parser = values_parser or (lambda val: val) - for key, (_, val) in section_options.items(): - value[key] = values_parser(val) - return value - - def parse_section(self, section_options): - """Parses configuration file section. - - :param dict section_options: - """ - for (name, (_, value)) in section_options.items(): - try: - self[name] = value - - except KeyError: - pass # Keep silent for a new option may appear anytime. - - def parse(self): - """Parses configuration file items from one - or more related sections. - - """ - for section_name, section_options in self.sections.items(): - - method_postfix = '' - if section_name: # [section.option] variant - method_postfix = '_%s' % section_name - - section_parser_method = getattr( - self, - # Dots in section names are translated into dunderscores. - ('parse_section%s' % method_postfix).replace('.', '__'), - None, - ) - - if section_parser_method is None: - raise DistutilsOptionError( - 'Unsupported distribution option section: [%s.%s]' - % (self.section_prefix, section_name) - ) - - section_parser_method(section_options) - - def _deprecated_config_handler(self, func, msg, warning_class): - """this function will wrap around parameters that are deprecated - - :param msg: deprecation message - :param warning_class: class of warning exception to be raised - :param func: function to be wrapped around - """ - - @wraps(func) - def config_handler(*args, **kwargs): - warnings.warn(msg, warning_class) - return func(*args, **kwargs) - - return config_handler - - -class ConfigMetadataHandler(ConfigHandler): - - section_prefix = 'metadata' - - aliases = { - 'home_page': 'url', - 'summary': 'description', - 'classifier': 'classifiers', - 'platform': 'platforms', - } - - strict_mode = False - """We need to keep it loose, to be partially compatible with - `pbr` and `d2to1` packages which also uses `metadata` section. - - """ - - def __init__( - self, target_obj, options, ignore_option_errors=False, package_dir=None - ): - super(ConfigMetadataHandler, self).__init__( - target_obj, options, ignore_option_errors - ) - self.package_dir = package_dir - - @property - def parsers(self): - """Metadata item name to parser function mapping.""" - parse_list = self._parse_list - parse_file = self._parse_file - parse_dict = self._parse_dict - exclude_files_parser = self._exclude_files_parser - - return { - 'platforms': parse_list, - 'keywords': parse_list, - 'provides': parse_list, - 'requires': self._deprecated_config_handler( - parse_list, - "The requires parameter is deprecated, please use " - "install_requires for runtime dependencies.", - DeprecationWarning, - ), - 'obsoletes': parse_list, - 'classifiers': self._get_parser_compound(parse_file, parse_list), - 'license': exclude_files_parser('license'), - 'license_file': self._deprecated_config_handler( - exclude_files_parser('license_file'), - "The license_file parameter is deprecated, " - "use license_files instead.", - DeprecationWarning, - ), - 'license_files': parse_list, - 'description': parse_file, - 'long_description': parse_file, - 'version': self._parse_version, - 'project_urls': parse_dict, - } - - def _parse_version(self, value): - """Parses `version` option value. - - :param value: - :rtype: str - - """ - version = self._parse_file(value) - - if version != value: - version = version.strip() - # Be strict about versions loaded from file because it's easy to - # accidentally include newlines and other unintended content - try: - Version(version) - except InvalidVersion: - tmpl = ( - 'Version loaded from {value} does not ' - 'comply with PEP 440: {version}' - ) - raise DistutilsOptionError(tmpl.format(**locals())) - - return version - - version = self._parse_attr(value, self.package_dir) - - if callable(version): - version = version() - - if not isinstance(version, str): - if hasattr(version, '__iter__'): - version = '.'.join(map(str, version)) - else: - version = '%s' % version - - return version - - -class ConfigOptionsHandler(ConfigHandler): - - section_prefix = 'options' - - @property - def parsers(self): - """Metadata item name to parser function mapping.""" - parse_list = self._parse_list - parse_list_semicolon = partial(self._parse_list, separator=';') - parse_bool = self._parse_bool - parse_dict = self._parse_dict - parse_cmdclass = self._parse_cmdclass - - return { - 'zip_safe': parse_bool, - 'include_package_data': parse_bool, - 'package_dir': parse_dict, - 'scripts': parse_list, - 'eager_resources': parse_list, - 'dependency_links': parse_list, - 'namespace_packages': parse_list, - 'install_requires': parse_list_semicolon, - 'setup_requires': parse_list_semicolon, - 'tests_require': parse_list_semicolon, - 'packages': self._parse_packages, - 'entry_points': self._parse_file, - 'py_modules': parse_list, - 'python_requires': SpecifierSet, - 'cmdclass': parse_cmdclass, - } - - def _parse_cmdclass(self, value): - def resolve_class(qualified_class_name): - idx = qualified_class_name.rfind('.') - class_name = qualified_class_name[idx + 1 :] - pkg_name = qualified_class_name[:idx] - - module = __import__(pkg_name) - - return getattr(module, class_name) - - return {k: resolve_class(v) for k, v in self._parse_dict(value).items()} - - def _parse_packages(self, value): - """Parses `packages` option value. - - :param value: - :rtype: list - """ - find_directives = ['find:', 'find_namespace:'] - trimmed_value = value.strip() - - if trimmed_value not in find_directives: - return self._parse_list(value) - - findns = trimmed_value == find_directives[1] - - # Read function arguments from a dedicated section. - find_kwargs = self.parse_section_packages__find( - self.sections.get('packages.find', {}) - ) - - if findns: - from setuptools import find_namespace_packages as find_packages - else: - from setuptools import find_packages - - return find_packages(**find_kwargs) - - def parse_section_packages__find(self, section_options): - """Parses `packages.find` configuration file section. - - To be used in conjunction with _parse_packages(). - - :param dict section_options: - """ - section_data = self._parse_section_to_dict(section_options, self._parse_list) - - valid_keys = ['where', 'include', 'exclude'] - - find_kwargs = dict( - [(k, v) for k, v in section_data.items() if k in valid_keys and v] - ) - - where = find_kwargs.get('where') - if where is not None: - find_kwargs['where'] = where[0] # cast list to single val - - return find_kwargs - - def parse_section_entry_points(self, section_options): - """Parses `entry_points` configuration file section. - - :param dict section_options: - """ - parsed = self._parse_section_to_dict(section_options, self._parse_list) - self['entry_points'] = parsed - - def _parse_package_data(self, section_options): - parsed = self._parse_section_to_dict(section_options, self._parse_list) - - root = parsed.get('*') - if root: - parsed[''] = root - del parsed['*'] - - return parsed - - def parse_section_package_data(self, section_options): - """Parses `package_data` configuration file section. - - :param dict section_options: - """ - self['package_data'] = self._parse_package_data(section_options) - - def parse_section_exclude_package_data(self, section_options): - """Parses `exclude_package_data` configuration file section. - - :param dict section_options: - """ - self['exclude_package_data'] = self._parse_package_data(section_options) - - def parse_section_extras_require(self, section_options): - """Parses `extras_require` configuration file section. - - :param dict section_options: - """ - parse_list = partial(self._parse_list, separator=';') - self['extras_require'] = self._parse_section_to_dict( - section_options, parse_list - ) - - def parse_section_data_files(self, section_options): - """Parses `data_files` configuration file section. - - :param dict section_options: - """ - parsed = self._parse_section_to_dict(section_options, self._parse_list_glob) - self['data_files'] = [(k, v) for k, v in parsed.items()] diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/dep_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/dep_util.py deleted file mode 100755 index 521eb71..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/dep_util.py +++ /dev/null @@ -1,25 +0,0 @@ -from distutils.dep_util import newer_group - - -# yes, this is was almost entirely copy-pasted from -# 'newer_pairwise()', this is just another convenience -# function. -def newer_pairwise_group(sources_groups, targets): - """Walk both arguments in parallel, testing if each source group is newer - than its corresponding target. Returns a pair of lists (sources_groups, - targets) where sources is newer than target, according to the semantics - of 'newer_group()'. - """ - if len(sources_groups) != len(targets): - raise ValueError( - "'sources_group' and 'targets' must be the same length") - - # build a pair of lists (sources_groups, targets) where source is newer - n_sources = [] - n_targets = [] - for i in range(len(sources_groups)): - if newer_group(sources_groups[i], targets[i]): - n_sources.append(sources_groups[i]) - n_targets.append(targets[i]) - - return n_sources, n_targets diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/depends.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/depends.py deleted file mode 100755 index adffd12..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/depends.py +++ /dev/null @@ -1,176 +0,0 @@ -import sys -import marshal -import contextlib -import dis - -from setuptools.extern.packaging import version - -from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE -from . import _imp - - -__all__ = [ - 'Require', 'find_module', 'get_module_constant', 'extract_constant' -] - - -class Require: - """A prerequisite to building or installing a distribution""" - - def __init__( - self, name, requested_version, module, homepage='', - attribute=None, format=None): - - if format is None and requested_version is not None: - format = version.Version - - if format is not None: - requested_version = format(requested_version) - if attribute is None: - attribute = '__version__' - - self.__dict__.update(locals()) - del self.self - - def full_name(self): - """Return full package/distribution name, w/version""" - if self.requested_version is not None: - return '%s-%s' % (self.name, self.requested_version) - return self.name - - def version_ok(self, version): - """Is 'version' sufficiently up-to-date?""" - return self.attribute is None or self.format is None or \ - str(version) != "unknown" and self.format(version) >= self.requested_version - - def get_version(self, paths=None, default="unknown"): - """Get version number of installed module, 'None', or 'default' - - Search 'paths' for module. If not found, return 'None'. If found, - return the extracted version attribute, or 'default' if no version - attribute was specified, or the value cannot be determined without - importing the module. The version is formatted according to the - requirement's version format (if any), unless it is 'None' or the - supplied 'default'. - """ - - if self.attribute is None: - try: - f, p, i = find_module(self.module, paths) - if f: - f.close() - return default - except ImportError: - return None - - v = get_module_constant(self.module, self.attribute, default, paths) - - if v is not None and v is not default and self.format is not None: - return self.format(v) - - return v - - def is_present(self, paths=None): - """Return true if dependency is present on 'paths'""" - return self.get_version(paths) is not None - - def is_current(self, paths=None): - """Return true if dependency is present and up-to-date on 'paths'""" - version = self.get_version(paths) - if version is None: - return False - return self.version_ok(str(version)) - - -def maybe_close(f): - @contextlib.contextmanager - def empty(): - yield - return - if not f: - return empty() - - return contextlib.closing(f) - - -def get_module_constant(module, symbol, default=-1, paths=None): - """Find 'module' by searching 'paths', and extract 'symbol' - - Return 'None' if 'module' does not exist on 'paths', or it does not define - 'symbol'. If the module defines 'symbol' as a constant, return the - constant. Otherwise, return 'default'.""" - - try: - f, path, (suffix, mode, kind) = info = find_module(module, paths) - except ImportError: - # Module doesn't exist - return None - - with maybe_close(f): - if kind == PY_COMPILED: - f.read(8) # skip magic & date - code = marshal.load(f) - elif kind == PY_FROZEN: - code = _imp.get_frozen_object(module, paths) - elif kind == PY_SOURCE: - code = compile(f.read(), path, 'exec') - else: - # Not something we can parse; we'll have to import it. :( - imported = _imp.get_module(module, paths, info) - return getattr(imported, symbol, None) - - return extract_constant(code, symbol, default) - - -def extract_constant(code, symbol, default=-1): - """Extract the constant value of 'symbol' from 'code' - - If the name 'symbol' is bound to a constant value by the Python code - object 'code', return that value. If 'symbol' is bound to an expression, - return 'default'. Otherwise, return 'None'. - - Return value is based on the first assignment to 'symbol'. 'symbol' must - be a global, or at least a non-"fast" local in the code block. That is, - only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' - must be present in 'code.co_names'. - """ - if symbol not in code.co_names: - # name's not there, can't possibly be an assignment - return None - - name_idx = list(code.co_names).index(symbol) - - STORE_NAME = 90 - STORE_GLOBAL = 97 - LOAD_CONST = 100 - - const = default - - for byte_code in dis.Bytecode(code): - op = byte_code.opcode - arg = byte_code.arg - - if op == LOAD_CONST: - const = code.co_consts[arg] - elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): - return const - else: - const = default - - -def _update_globals(): - """ - Patch the globals to remove the objects not available on some platforms. - - XXX it'd be better to test assertions about bytecode instead. - """ - - if not sys.platform.startswith('java') and sys.platform != 'cli': - return - incompatible = 'extract_constant', 'get_module_constant' - for name in incompatible: - del globals()[name] - __all__.remove(name) - - -_update_globals() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/dist.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/dist.py deleted file mode 100755 index f4a56b0..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/dist.py +++ /dev/null @@ -1,1164 +0,0 @@ -# -*- coding: utf-8 -*- -__all__ = ['Distribution'] - -import io -import sys -import re -import os -import warnings -import numbers -import distutils.log -import distutils.core -import distutils.cmd -import distutils.dist -import distutils.command -from distutils.util import strtobool -from distutils.debug import DEBUG -from distutils.fancy_getopt import translate_longopt -from glob import iglob -import itertools -import textwrap -from typing import List, Optional, TYPE_CHECKING - -from collections import defaultdict -from email import message_from_file - -from distutils.errors import DistutilsOptionError, DistutilsSetupError -from distutils.util import rfc822_escape - -from setuptools.extern import packaging -from setuptools.extern import ordered_set -from setuptools.extern.more_itertools import unique_everseen - -from . import SetuptoolsDeprecationWarning - -import setuptools -import setuptools.command -from setuptools import windows_support -from setuptools.monkey import get_unpatched -from setuptools.config import parse_configuration -import pkg_resources -from setuptools.extern.packaging import version - -if TYPE_CHECKING: - from email.message import Message - -__import__('setuptools.extern.packaging.specifiers') -__import__('setuptools.extern.packaging.version') - - -def _get_unpatched(cls): - warnings.warn("Do not call this function", DistDeprecationWarning) - return get_unpatched(cls) - - -def get_metadata_version(self): - mv = getattr(self, 'metadata_version', None) - if mv is None: - mv = version.Version('2.1') - self.metadata_version = mv - return mv - - -def rfc822_unescape(content: str) -> str: - """Reverse RFC-822 escaping by removing leading whitespaces from content.""" - lines = content.splitlines() - if len(lines) == 1: - return lines[0].lstrip() - return '\n'.join((lines[0].lstrip(), textwrap.dedent('\n'.join(lines[1:])))) - - -def _read_field_from_msg(msg: "Message", field: str) -> Optional[str]: - """Read Message header field.""" - value = msg[field] - if value == 'UNKNOWN': - return None - return value - - -def _read_field_unescaped_from_msg(msg: "Message", field: str) -> Optional[str]: - """Read Message header field and apply rfc822_unescape.""" - value = _read_field_from_msg(msg, field) - if value is None: - return value - return rfc822_unescape(value) - - -def _read_list_from_msg(msg: "Message", field: str) -> Optional[List[str]]: - """Read Message header field and return all results as list.""" - values = msg.get_all(field, None) - if values == []: - return None - return values - - -def _read_payload_from_msg(msg: "Message") -> Optional[str]: - value = msg.get_payload().strip() - if value == 'UNKNOWN': - return None - return value - - -def read_pkg_file(self, file): - """Reads the metadata values from a file object.""" - msg = message_from_file(file) - - self.metadata_version = version.Version(msg['metadata-version']) - self.name = _read_field_from_msg(msg, 'name') - self.version = _read_field_from_msg(msg, 'version') - self.description = _read_field_from_msg(msg, 'summary') - # we are filling author only. - self.author = _read_field_from_msg(msg, 'author') - self.maintainer = None - self.author_email = _read_field_from_msg(msg, 'author-email') - self.maintainer_email = None - self.url = _read_field_from_msg(msg, 'home-page') - self.download_url = _read_field_from_msg(msg, 'download-url') - self.license = _read_field_unescaped_from_msg(msg, 'license') - - self.long_description = _read_field_unescaped_from_msg(msg, 'description') - if ( - self.long_description is None and - self.metadata_version >= version.Version('2.1') - ): - self.long_description = _read_payload_from_msg(msg) - self.description = _read_field_from_msg(msg, 'summary') - - if 'keywords' in msg: - self.keywords = _read_field_from_msg(msg, 'keywords').split(',') - - self.platforms = _read_list_from_msg(msg, 'platform') - self.classifiers = _read_list_from_msg(msg, 'classifier') - - # PEP 314 - these fields only exist in 1.1 - if self.metadata_version == version.Version('1.1'): - self.requires = _read_list_from_msg(msg, 'requires') - self.provides = _read_list_from_msg(msg, 'provides') - self.obsoletes = _read_list_from_msg(msg, 'obsoletes') - else: - self.requires = None - self.provides = None - self.obsoletes = None - - self.license_files = _read_list_from_msg(msg, 'license-file') - - -def single_line(val): - """ - Quick and dirty validation for Summary pypa/setuptools#1390. - """ - if '\n' in val: - # TODO: Replace with `raise ValueError("newlines not allowed")` - # after reviewing #2893. - warnings.warn("newlines not allowed and will break in the future") - val = val.strip().split('\n')[0] - return val - - -# Based on Python 3.5 version -def write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME - """Write the PKG-INFO format data to a file object.""" - version = self.get_metadata_version() - - def write_field(key, value): - file.write("%s: %s\n" % (key, value)) - - write_field('Metadata-Version', str(version)) - write_field('Name', self.get_name()) - write_field('Version', self.get_version()) - write_field('Summary', single_line(self.get_description())) - - optional_fields = ( - ('Home-page', 'url'), - ('Download-URL', 'download_url'), - ('Author', 'author'), - ('Author-email', 'author_email'), - ('Maintainer', 'maintainer'), - ('Maintainer-email', 'maintainer_email'), - ) - - for field, attr in optional_fields: - attr_val = getattr(self, attr, None) - if attr_val is not None: - write_field(field, attr_val) - - license = rfc822_escape(self.get_license()) - write_field('License', license) - for project_url in self.project_urls.items(): - write_field('Project-URL', '%s, %s' % project_url) - - keywords = ','.join(self.get_keywords()) - if keywords: - write_field('Keywords', keywords) - - for platform in self.get_platforms(): - write_field('Platform', platform) - - self._write_list(file, 'Classifier', self.get_classifiers()) - - # PEP 314 - self._write_list(file, 'Requires', self.get_requires()) - self._write_list(file, 'Provides', self.get_provides()) - self._write_list(file, 'Obsoletes', self.get_obsoletes()) - - # Setuptools specific for PEP 345 - if hasattr(self, 'python_requires'): - write_field('Requires-Python', self.python_requires) - - # PEP 566 - if self.long_description_content_type: - write_field('Description-Content-Type', self.long_description_content_type) - if self.provides_extras: - for extra in self.provides_extras: - write_field('Provides-Extra', extra) - - self._write_list(file, 'License-File', self.license_files or []) - - file.write("\n%s\n\n" % self.get_long_description()) - - -sequence = tuple, list - - -def check_importable(dist, attr, value): - try: - ep = pkg_resources.EntryPoint.parse('x=' + value) - assert not ep.extras - except (TypeError, ValueError, AttributeError, AssertionError) as e: - raise DistutilsSetupError( - "%r must be importable 'module:attrs' string (got %r)" % (attr, value) - ) from e - - -def assert_string_list(dist, attr, value): - """Verify that value is a string list""" - try: - # verify that value is a list or tuple to exclude unordered - # or single-use iterables - assert isinstance(value, (list, tuple)) - # verify that elements of value are strings - assert ''.join(value) != value - except (TypeError, ValueError, AttributeError, AssertionError) as e: - raise DistutilsSetupError( - "%r must be a list of strings (got %r)" % (attr, value) - ) from e - - -def check_nsp(dist, attr, value): - """Verify that namespace packages are valid""" - ns_packages = value - assert_string_list(dist, attr, ns_packages) - for nsp in ns_packages: - if not dist.has_contents_for(nsp): - raise DistutilsSetupError( - "Distribution contains no modules or packages for " - + "namespace package %r" % nsp - ) - parent, sep, child = nsp.rpartition('.') - if parent and parent not in ns_packages: - distutils.log.warn( - "WARNING: %r is declared as a package namespace, but %r" - " is not: please correct this in setup.py", - nsp, - parent, - ) - - -def check_extras(dist, attr, value): - """Verify that extras_require mapping is valid""" - try: - list(itertools.starmap(_check_extra, value.items())) - except (TypeError, ValueError, AttributeError) as e: - raise DistutilsSetupError( - "'extras_require' must be a dictionary whose values are " - "strings or lists of strings containing valid project/version " - "requirement specifiers." - ) from e - - -def _check_extra(extra, reqs): - name, sep, marker = extra.partition(':') - if marker and pkg_resources.invalid_marker(marker): - raise DistutilsSetupError("Invalid environment marker: " + marker) - list(pkg_resources.parse_requirements(reqs)) - - -def assert_bool(dist, attr, value): - """Verify that value is True, False, 0, or 1""" - if bool(value) != value: - tmpl = "{attr!r} must be a boolean value (got {value!r})" - raise DistutilsSetupError(tmpl.format(attr=attr, value=value)) - - -def invalid_unless_false(dist, attr, value): - if not value: - warnings.warn(f"{attr} is ignored.", DistDeprecationWarning) - return - raise DistutilsSetupError(f"{attr} is invalid.") - - -def check_requirements(dist, attr, value): - """Verify that install_requires is a valid requirements list""" - try: - list(pkg_resources.parse_requirements(value)) - if isinstance(value, (dict, set)): - raise TypeError("Unordered types are not allowed") - except (TypeError, ValueError) as error: - tmpl = ( - "{attr!r} must be a string or list of strings " - "containing valid project/version requirement specifiers; {error}" - ) - raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) from error - - -def check_specifier(dist, attr, value): - """Verify that value is a valid version specifier""" - try: - packaging.specifiers.SpecifierSet(value) - except (packaging.specifiers.InvalidSpecifier, AttributeError) as error: - tmpl = ( - "{attr!r} must be a string " "containing valid version specifiers; {error}" - ) - raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) from error - - -def check_entry_points(dist, attr, value): - """Verify that entry_points map is parseable""" - try: - pkg_resources.EntryPoint.parse_map(value) - except ValueError as e: - raise DistutilsSetupError(e) from e - - -def check_test_suite(dist, attr, value): - if not isinstance(value, str): - raise DistutilsSetupError("test_suite must be a string") - - -def check_package_data(dist, attr, value): - """Verify that value is a dictionary of package names to glob lists""" - if not isinstance(value, dict): - raise DistutilsSetupError( - "{!r} must be a dictionary mapping package names to lists of " - "string wildcard patterns".format(attr) - ) - for k, v in value.items(): - if not isinstance(k, str): - raise DistutilsSetupError( - "keys of {!r} dict must be strings (got {!r})".format(attr, k) - ) - assert_string_list(dist, 'values of {!r} dict'.format(attr), v) - - -def check_packages(dist, attr, value): - for pkgname in value: - if not re.match(r'\w+(\.\w+)*', pkgname): - distutils.log.warn( - "WARNING: %r not a valid package name; please use only " - ".-separated package names in setup.py", - pkgname, - ) - - -_Distribution = get_unpatched(distutils.core.Distribution) - - -class Distribution(_Distribution): - """Distribution with support for tests and package data - - This is an enhanced version of 'distutils.dist.Distribution' that - effectively adds the following new optional keyword arguments to 'setup()': - - 'install_requires' -- a string or sequence of strings specifying project - versions that the distribution requires when installed, in the format - used by 'pkg_resources.require()'. They will be installed - automatically when the package is installed. If you wish to use - packages that are not available in PyPI, or want to give your users an - alternate download location, you can add a 'find_links' option to the - '[easy_install]' section of your project's 'setup.cfg' file, and then - setuptools will scan the listed web pages for links that satisfy the - requirements. - - 'extras_require' -- a dictionary mapping names of optional "extras" to the - additional requirement(s) that using those extras incurs. For example, - this:: - - extras_require = dict(reST = ["docutils>=0.3", "reSTedit"]) - - indicates that the distribution can optionally provide an extra - capability called "reST", but it can only be used if docutils and - reSTedit are installed. If the user installs your package using - EasyInstall and requests one of your extras, the corresponding - additional requirements will be installed if needed. - - 'test_suite' -- the name of a test suite to run for the 'test' command. - If the user runs 'python setup.py test', the package will be installed, - and the named test suite will be run. The format is the same as - would be used on a 'unittest.py' command line. That is, it is the - dotted name of an object to import and call to generate a test suite. - - 'package_data' -- a dictionary mapping package names to lists of filenames - or globs to use to find data files contained in the named packages. - If the dictionary has filenames or globs listed under '""' (the empty - string), those names will be searched for in every package, in addition - to any names for the specific package. Data files found using these - names/globs will be installed along with the package, in the same - location as the package. Note that globs are allowed to reference - the contents of non-package subdirectories, as long as you use '/' as - a path separator. (Globs are automatically converted to - platform-specific paths at runtime.) - - In addition to these new keywords, this class also has several new methods - for manipulating the distribution's contents. For example, the 'include()' - and 'exclude()' methods can be thought of as in-place add and subtract - commands that add or remove packages, modules, extensions, and so on from - the distribution. - """ - - _DISTUTILS_UNSUPPORTED_METADATA = { - 'long_description_content_type': lambda: None, - 'project_urls': dict, - 'provides_extras': ordered_set.OrderedSet, - 'license_file': lambda: None, - 'license_files': lambda: None, - } - - _patched_dist = None - - def patch_missing_pkg_info(self, attrs): - # Fake up a replacement for the data that would normally come from - # PKG-INFO, but which might not yet be built if this is a fresh - # checkout. - # - if not attrs or 'name' not in attrs or 'version' not in attrs: - return - key = pkg_resources.safe_name(str(attrs['name'])).lower() - dist = pkg_resources.working_set.by_key.get(key) - if dist is not None and not dist.has_metadata('PKG-INFO'): - dist._version = pkg_resources.safe_version(str(attrs['version'])) - self._patched_dist = dist - - def __init__(self, attrs=None): - have_package_data = hasattr(self, "package_data") - if not have_package_data: - self.package_data = {} - attrs = attrs or {} - self.dist_files = [] - # Filter-out setuptools' specific options. - self.src_root = attrs.pop("src_root", None) - self.patch_missing_pkg_info(attrs) - self.dependency_links = attrs.pop('dependency_links', []) - self.setup_requires = attrs.pop('setup_requires', []) - for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): - vars(self).setdefault(ep.name, None) - _Distribution.__init__( - self, - { - k: v - for k, v in attrs.items() - if k not in self._DISTUTILS_UNSUPPORTED_METADATA - }, - ) - - self._set_metadata_defaults(attrs) - - self.metadata.version = self._normalize_version( - self._validate_version(self.metadata.version) - ) - self._finalize_requires() - - def _validate_metadata(self): - required = {"name"} - provided = { - key - for key in vars(self.metadata) - if getattr(self.metadata, key, None) is not None - } - missing = required - provided - - if missing: - msg = f"Required package metadata is missing: {missing}" - raise DistutilsSetupError(msg) - - def _set_metadata_defaults(self, attrs): - """ - Fill-in missing metadata fields not supported by distutils. - Some fields may have been set by other tools (e.g. pbr). - Those fields (vars(self.metadata)) take precedence to - supplied attrs. - """ - for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items(): - vars(self.metadata).setdefault(option, attrs.get(option, default())) - - @staticmethod - def _normalize_version(version): - if isinstance(version, setuptools.sic) or version is None: - return version - - normalized = str(packaging.version.Version(version)) - if version != normalized: - tmpl = "Normalizing '{version}' to '{normalized}'" - warnings.warn(tmpl.format(**locals())) - return normalized - return version - - @staticmethod - def _validate_version(version): - if isinstance(version, numbers.Number): - # Some people apparently take "version number" too literally :) - version = str(version) - - if version is not None: - try: - packaging.version.Version(version) - except (packaging.version.InvalidVersion, TypeError): - warnings.warn( - "The version specified (%r) is an invalid version, this " - "may not work as expected with newer versions of " - "setuptools, pip, and PyPI. Please see PEP 440 for more " - "details." % version - ) - return setuptools.sic(version) - return version - - def _finalize_requires(self): - """ - Set `metadata.python_requires` and fix environment markers - in `install_requires` and `extras_require`. - """ - if getattr(self, 'python_requires', None): - self.metadata.python_requires = self.python_requires - - if getattr(self, 'extras_require', None): - for extra in self.extras_require.keys(): - # Since this gets called multiple times at points where the - # keys have become 'converted' extras, ensure that we are only - # truly adding extras we haven't seen before here. - extra = extra.split(':')[0] - if extra: - self.metadata.provides_extras.add(extra) - - self._convert_extras_requirements() - self._move_install_requirements_markers() - - def _convert_extras_requirements(self): - """ - Convert requirements in `extras_require` of the form - `"extra": ["barbazquux; {marker}"]` to - `"extra:{marker}": ["barbazquux"]`. - """ - spec_ext_reqs = getattr(self, 'extras_require', None) or {} - self._tmp_extras_require = defaultdict(list) - for section, v in spec_ext_reqs.items(): - # Do not strip empty sections. - self._tmp_extras_require[section] - for r in pkg_resources.parse_requirements(v): - suffix = self._suffix_for(r) - self._tmp_extras_require[section + suffix].append(r) - - @staticmethod - def _suffix_for(req): - """ - For a requirement, return the 'extras_require' suffix for - that requirement. - """ - return ':' + str(req.marker) if req.marker else '' - - def _move_install_requirements_markers(self): - """ - Move requirements in `install_requires` that are using environment - markers `extras_require`. - """ - - # divide the install_requires into two sets, simple ones still - # handled by install_requires and more complex ones handled - # by extras_require. - - def is_simple_req(req): - return not req.marker - - spec_inst_reqs = getattr(self, 'install_requires', None) or () - inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs)) - simple_reqs = filter(is_simple_req, inst_reqs) - complex_reqs = itertools.filterfalse(is_simple_req, inst_reqs) - self.install_requires = list(map(str, simple_reqs)) - - for r in complex_reqs: - self._tmp_extras_require[':' + str(r.marker)].append(r) - self.extras_require = dict( - (k, [str(r) for r in map(self._clean_req, v)]) - for k, v in self._tmp_extras_require.items() - ) - - def _clean_req(self, req): - """ - Given a Requirement, remove environment markers and return it. - """ - req.marker = None - return req - - def _finalize_license_files(self): - """Compute names of all license files which should be included.""" - license_files: Optional[List[str]] = self.metadata.license_files - patterns: List[str] = license_files if license_files else [] - - license_file: Optional[str] = self.metadata.license_file - if license_file and license_file not in patterns: - patterns.append(license_file) - - if license_files is None and license_file is None: - # Default patterns match the ones wheel uses - # See https://wheel.readthedocs.io/en/stable/user_guide.html - # -> 'Including license files in the generated wheel file' - patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*') - - self.metadata.license_files = list( - unique_everseen(self._expand_patterns(patterns)) - ) - - @staticmethod - def _expand_patterns(patterns): - """ - >>> list(Distribution._expand_patterns(['LICENSE'])) - ['LICENSE'] - >>> list(Distribution._expand_patterns(['setup.cfg', 'LIC*'])) - ['setup.cfg', 'LICENSE'] - """ - return ( - path - for pattern in patterns - for path in sorted(iglob(pattern)) - if not path.endswith('~') and os.path.isfile(path) - ) - - # FIXME: 'Distribution._parse_config_files' is too complex (14) - def _parse_config_files(self, filenames=None): # noqa: C901 - """ - Adapted from distutils.dist.Distribution.parse_config_files, - this method provides the same functionality in subtly-improved - ways. - """ - from configparser import ConfigParser - - # Ignore install directory options if we have a venv - ignore_options = ( - [] - if sys.prefix == sys.base_prefix - else [ - 'install-base', - 'install-platbase', - 'install-lib', - 'install-platlib', - 'install-purelib', - 'install-headers', - 'install-scripts', - 'install-data', - 'prefix', - 'exec-prefix', - 'home', - 'user', - 'root', - ] - ) - - ignore_options = frozenset(ignore_options) - - if filenames is None: - filenames = self.find_config_files() - - if DEBUG: - self.announce("Distribution.parse_config_files():") - - parser = ConfigParser() - parser.optionxform = str - for filename in filenames: - with io.open(filename, encoding='utf-8') as reader: - if DEBUG: - self.announce(" reading {filename}".format(**locals())) - parser.read_file(reader) - for section in parser.sections(): - options = parser.options(section) - opt_dict = self.get_option_dict(section) - - for opt in options: - if opt == '__name__' or opt in ignore_options: - continue - - val = parser.get(section, opt) - opt = self.warn_dash_deprecation(opt, section) - opt = self.make_option_lowercase(opt, section) - opt_dict[opt] = (filename, val) - - # Make the ConfigParser forget everything (so we retain - # the original filenames that options come from) - parser.__init__() - - if 'global' not in self.command_options: - return - - # If there was a "global" section in the config file, use it - # to set Distribution options. - - for (opt, (src, val)) in self.command_options['global'].items(): - alias = self.negative_opt.get(opt) - if alias: - val = not strtobool(val) - elif opt in ('verbose', 'dry_run'): # ugh! - val = strtobool(val) - - try: - setattr(self, alias or opt, val) - except ValueError as e: - raise DistutilsOptionError(e) from e - - def warn_dash_deprecation(self, opt, section): - if section in ( - 'options.extras_require', - 'options.data_files', - ): - return opt - - underscore_opt = opt.replace('-', '_') - commands = distutils.command.__all__ + self._setuptools_commands() - if ( - not section.startswith('options') - and section != 'metadata' - and section not in commands - ): - return underscore_opt - - if '-' in opt: - warnings.warn( - "Usage of dash-separated '%s' will not be supported in future " - "versions. Please use the underscore name '%s' instead" - % (opt, underscore_opt) - ) - return underscore_opt - - def _setuptools_commands(self): - try: - dist = pkg_resources.get_distribution('setuptools') - return list(dist.get_entry_map('distutils.commands')) - except pkg_resources.DistributionNotFound: - # during bootstrapping, distribution doesn't exist - return [] - - def make_option_lowercase(self, opt, section): - if section != 'metadata' or opt.islower(): - return opt - - lowercase_opt = opt.lower() - warnings.warn( - "Usage of uppercase key '%s' in '%s' will be deprecated in future " - "versions. Please use lowercase '%s' instead" - % (opt, section, lowercase_opt) - ) - return lowercase_opt - - # FIXME: 'Distribution._set_command_options' is too complex (14) - def _set_command_options(self, command_obj, option_dict=None): # noqa: C901 - """ - Set the options for 'command_obj' from 'option_dict'. Basically - this means copying elements of a dictionary ('option_dict') to - attributes of an instance ('command'). - - 'command_obj' must be a Command instance. If 'option_dict' is not - supplied, uses the standard option dictionary for this command - (from 'self.command_options'). - - (Adopted from distutils.dist.Distribution._set_command_options) - """ - command_name = command_obj.get_command_name() - if option_dict is None: - option_dict = self.get_option_dict(command_name) - - if DEBUG: - self.announce(" setting options for '%s' command:" % command_name) - for (option, (source, value)) in option_dict.items(): - if DEBUG: - self.announce(" %s = %s (from %s)" % (option, value, source)) - try: - bool_opts = [translate_longopt(o) for o in command_obj.boolean_options] - except AttributeError: - bool_opts = [] - try: - neg_opt = command_obj.negative_opt - except AttributeError: - neg_opt = {} - - try: - is_string = isinstance(value, str) - if option in neg_opt and is_string: - setattr(command_obj, neg_opt[option], not strtobool(value)) - elif option in bool_opts and is_string: - setattr(command_obj, option, strtobool(value)) - elif hasattr(command_obj, option): - setattr(command_obj, option, value) - else: - raise DistutilsOptionError( - "error in %s: command '%s' has no such option '%s'" - % (source, command_name, option) - ) - except ValueError as e: - raise DistutilsOptionError(e) from e - - def parse_config_files(self, filenames=None, ignore_option_errors=False): - """Parses configuration files from various levels - and loads configuration. - - """ - self._parse_config_files(filenames=filenames) - - parse_configuration( - self, self.command_options, ignore_option_errors=ignore_option_errors - ) - self._finalize_requires() - self._finalize_license_files() - - def fetch_build_eggs(self, requires): - """Resolve pre-setup requirements""" - resolved_dists = pkg_resources.working_set.resolve( - pkg_resources.parse_requirements(requires), - installer=self.fetch_build_egg, - replace_conflicting=True, - ) - for dist in resolved_dists: - pkg_resources.working_set.add(dist, replace=True) - return resolved_dists - - def finalize_options(self): - """ - Allow plugins to apply arbitrary operations to the - distribution. Each hook may optionally define a 'order' - to influence the order of execution. Smaller numbers - go first and the default is 0. - """ - group = 'setuptools.finalize_distribution_options' - - def by_order(hook): - return getattr(hook, 'order', 0) - - defined = pkg_resources.iter_entry_points(group) - filtered = itertools.filterfalse(self._removed, defined) - loaded = map(lambda e: e.load(), filtered) - for ep in sorted(loaded, key=by_order): - ep(self) - - @staticmethod - def _removed(ep): - """ - When removing an entry point, if metadata is loaded - from an older version of Setuptools, that removed - entry point will attempt to be loaded and will fail. - See #2765 for more details. - """ - removed = { - # removed 2021-09-05 - '2to3_doctests', - } - return ep.name in removed - - def _finalize_setup_keywords(self): - for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): - value = getattr(self, ep.name, None) - if value is not None: - ep.require(installer=self.fetch_build_egg) - ep.load()(self, ep.name, value) - - def get_egg_cache_dir(self): - egg_cache_dir = os.path.join(os.curdir, '.eggs') - if not os.path.exists(egg_cache_dir): - os.mkdir(egg_cache_dir) - windows_support.hide_file(egg_cache_dir) - readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt') - with open(readme_txt_filename, 'w') as f: - f.write( - 'This directory contains eggs that were downloaded ' - 'by setuptools to build, test, and run plug-ins.\n\n' - ) - f.write( - 'This directory caches those eggs to prevent ' - 'repeated downloads.\n\n' - ) - f.write('However, it is safe to delete this directory.\n\n') - - return egg_cache_dir - - def fetch_build_egg(self, req): - """Fetch an egg needed for building""" - from setuptools.installer import fetch_build_egg - - return fetch_build_egg(self, req) - - def get_command_class(self, command): - """Pluggable version of get_command_class()""" - if command in self.cmdclass: - return self.cmdclass[command] - - eps = pkg_resources.iter_entry_points('distutils.commands', command) - for ep in eps: - ep.require(installer=self.fetch_build_egg) - self.cmdclass[command] = cmdclass = ep.load() - return cmdclass - else: - return _Distribution.get_command_class(self, command) - - def print_commands(self): - for ep in pkg_resources.iter_entry_points('distutils.commands'): - if ep.name not in self.cmdclass: - # don't require extras as the commands won't be invoked - cmdclass = ep.resolve() - self.cmdclass[ep.name] = cmdclass - return _Distribution.print_commands(self) - - def get_command_list(self): - for ep in pkg_resources.iter_entry_points('distutils.commands'): - if ep.name not in self.cmdclass: - # don't require extras as the commands won't be invoked - cmdclass = ep.resolve() - self.cmdclass[ep.name] = cmdclass - return _Distribution.get_command_list(self) - - def include(self, **attrs): - """Add items to distribution that are named in keyword arguments - - For example, 'dist.include(py_modules=["x"])' would add 'x' to - the distribution's 'py_modules' attribute, if it was not already - there. - - Currently, this method only supports inclusion for attributes that are - lists or tuples. If you need to add support for adding to other - attributes in this or a subclass, you can add an '_include_X' method, - where 'X' is the name of the attribute. The method will be called with - the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' - will try to call 'dist._include_foo({"bar":"baz"})', which can then - handle whatever special inclusion logic is needed. - """ - for k, v in attrs.items(): - include = getattr(self, '_include_' + k, None) - if include: - include(v) - else: - self._include_misc(k, v) - - def exclude_package(self, package): - """Remove packages, modules, and extensions in named package""" - - pfx = package + '.' - if self.packages: - self.packages = [ - p for p in self.packages if p != package and not p.startswith(pfx) - ] - - if self.py_modules: - self.py_modules = [ - p for p in self.py_modules if p != package and not p.startswith(pfx) - ] - - if self.ext_modules: - self.ext_modules = [ - p - for p in self.ext_modules - if p.name != package and not p.name.startswith(pfx) - ] - - def has_contents_for(self, package): - """Return true if 'exclude_package(package)' would do something""" - - pfx = package + '.' - - for p in self.iter_distribution_names(): - if p == package or p.startswith(pfx): - return True - - def _exclude_misc(self, name, value): - """Handle 'exclude()' for list/tuple attrs without a special handler""" - if not isinstance(value, sequence): - raise DistutilsSetupError( - "%s: setting must be a list or tuple (%r)" % (name, value) - ) - try: - old = getattr(self, name) - except AttributeError as e: - raise DistutilsSetupError("%s: No such distribution setting" % name) from e - if old is not None and not isinstance(old, sequence): - raise DistutilsSetupError( - name + ": this setting cannot be changed via include/exclude" - ) - elif old: - setattr(self, name, [item for item in old if item not in value]) - - def _include_misc(self, name, value): - """Handle 'include()' for list/tuple attrs without a special handler""" - - if not isinstance(value, sequence): - raise DistutilsSetupError("%s: setting must be a list (%r)" % (name, value)) - try: - old = getattr(self, name) - except AttributeError as e: - raise DistutilsSetupError("%s: No such distribution setting" % name) from e - if old is None: - setattr(self, name, value) - elif not isinstance(old, sequence): - raise DistutilsSetupError( - name + ": this setting cannot be changed via include/exclude" - ) - else: - new = [item for item in value if item not in old] - setattr(self, name, old + new) - - def exclude(self, **attrs): - """Remove items from distribution that are named in keyword arguments - - For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from - the distribution's 'py_modules' attribute. Excluding packages uses - the 'exclude_package()' method, so all of the package's contained - packages, modules, and extensions are also excluded. - - Currently, this method only supports exclusion from attributes that are - lists or tuples. If you need to add support for excluding from other - attributes in this or a subclass, you can add an '_exclude_X' method, - where 'X' is the name of the attribute. The method will be called with - the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})' - will try to call 'dist._exclude_foo({"bar":"baz"})', which can then - handle whatever special exclusion logic is needed. - """ - for k, v in attrs.items(): - exclude = getattr(self, '_exclude_' + k, None) - if exclude: - exclude(v) - else: - self._exclude_misc(k, v) - - def _exclude_packages(self, packages): - if not isinstance(packages, sequence): - raise DistutilsSetupError( - "packages: setting must be a list or tuple (%r)" % (packages,) - ) - list(map(self.exclude_package, packages)) - - def _parse_command_opts(self, parser, args): - # Remove --with-X/--without-X options when processing command args - self.global_options = self.__class__.global_options - self.negative_opt = self.__class__.negative_opt - - # First, expand any aliases - command = args[0] - aliases = self.get_option_dict('aliases') - while command in aliases: - src, alias = aliases[command] - del aliases[command] # ensure each alias can expand only once! - import shlex - - args[:1] = shlex.split(alias, True) - command = args[0] - - nargs = _Distribution._parse_command_opts(self, parser, args) - - # Handle commands that want to consume all remaining arguments - cmd_class = self.get_command_class(command) - if getattr(cmd_class, 'command_consumes_arguments', None): - self.get_option_dict(command)['args'] = ("command line", nargs) - if nargs is not None: - return [] - - return nargs - - def get_cmdline_options(self): - """Return a '{cmd: {opt:val}}' map of all command-line options - - Option names are all long, but do not include the leading '--', and - contain dashes rather than underscores. If the option doesn't take - an argument (e.g. '--quiet'), the 'val' is 'None'. - - Note that options provided by config files are intentionally excluded. - """ - - d = {} - - for cmd, opts in self.command_options.items(): - - for opt, (src, val) in opts.items(): - - if src != "command line": - continue - - opt = opt.replace('_', '-') - - if val == 0: - cmdobj = self.get_command_obj(cmd) - neg_opt = self.negative_opt.copy() - neg_opt.update(getattr(cmdobj, 'negative_opt', {})) - for neg, pos in neg_opt.items(): - if pos == opt: - opt = neg - val = None - break - else: - raise AssertionError("Shouldn't be able to get here") - - elif val == 1: - val = None - - d.setdefault(cmd, {})[opt] = val - - return d - - def iter_distribution_names(self): - """Yield all packages, modules, and extension names in distribution""" - - for pkg in self.packages or (): - yield pkg - - for module in self.py_modules or (): - yield module - - for ext in self.ext_modules or (): - if isinstance(ext, tuple): - name, buildinfo = ext - else: - name = ext.name - if name.endswith('module'): - name = name[:-6] - yield name - - def handle_display_options(self, option_order): - """If there were any non-global "display-only" options - (--help-commands or the metadata display options) on the command - line, display the requested info and return true; else return - false. - """ - import sys - - if self.help_commands: - return _Distribution.handle_display_options(self, option_order) - - # Stdout may be StringIO (e.g. in tests) - if not isinstance(sys.stdout, io.TextIOWrapper): - return _Distribution.handle_display_options(self, option_order) - - # Don't wrap stdout if utf-8 is already the encoding. Provides - # workaround for #334. - if sys.stdout.encoding.lower() in ('utf-8', 'utf8'): - return _Distribution.handle_display_options(self, option_order) - - # Print metadata in UTF-8 no matter the platform - encoding = sys.stdout.encoding - errors = sys.stdout.errors - newline = sys.platform != 'win32' and '\n' or None - line_buffering = sys.stdout.line_buffering - - sys.stdout = io.TextIOWrapper( - sys.stdout.detach(), 'utf-8', errors, newline, line_buffering - ) - try: - return _Distribution.handle_display_options(self, option_order) - finally: - sys.stdout = io.TextIOWrapper( - sys.stdout.detach(), encoding, errors, newline, line_buffering - ) - - -class DistDeprecationWarning(SetuptoolsDeprecationWarning): - """Class for warning about deprecations in dist in - setuptools. Not ignored by default, unlike DeprecationWarning.""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/errors.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/errors.py deleted file mode 100755 index f4d35a6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/errors.py +++ /dev/null @@ -1,40 +0,0 @@ -"""setuptools.errors - -Provides exceptions used by setuptools modules. -""" - -from distutils import errors as _distutils_errors -from distutils.errors import DistutilsError - - -class RemovedCommandError(DistutilsError, RuntimeError): - """Error used for commands that have been removed in setuptools. - - Since ``setuptools`` is built on ``distutils``, simply removing a command - from ``setuptools`` will make the behavior fall back to ``distutils``; this - error is raised if a command exists in ``distutils`` but has been actively - removed in ``setuptools``. - """ - - -# Re-export errors from distutils to facilitate the migration to PEP632 - -ByteCompileError = _distutils_errors.DistutilsByteCompileError -CCompilerError = _distutils_errors.CCompilerError -ClassError = _distutils_errors.DistutilsClassError -CompileError = _distutils_errors.CompileError -ExecError = _distutils_errors.DistutilsExecError -FileError = _distutils_errors.DistutilsFileError -InternalError = _distutils_errors.DistutilsInternalError -LibError = _distutils_errors.LibError -LinkError = _distutils_errors.LinkError -ModuleError = _distutils_errors.DistutilsModuleError -OptionError = _distutils_errors.DistutilsOptionError -PlatformError = _distutils_errors.DistutilsPlatformError -PreprocessError = _distutils_errors.PreprocessError -SetupError = _distutils_errors.DistutilsSetupError -TemplateError = _distutils_errors.DistutilsTemplateError -UnknownFileError = _distutils_errors.UnknownFileError - -# The root error class in the hierarchy -BaseError = _distutils_errors.DistutilsError diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/extension.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/extension.py deleted file mode 100755 index f696c9c..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/extension.py +++ /dev/null @@ -1,55 +0,0 @@ -import re -import functools -import distutils.core -import distutils.errors -import distutils.extension - -from .monkey import get_unpatched - - -def _have_cython(): - """ - Return True if Cython can be imported. - """ - cython_impl = 'Cython.Distutils.build_ext' - try: - # from (cython_impl) import build_ext - __import__(cython_impl, fromlist=['build_ext']).build_ext - return True - except Exception: - pass - return False - - -# for compatibility -have_pyrex = _have_cython - -_Extension = get_unpatched(distutils.core.Extension) - - -class Extension(_Extension): - """Extension that uses '.c' files in place of '.pyx' files""" - - def __init__(self, name, sources, *args, **kw): - # The *args is needed for compatibility as calls may use positional - # arguments. py_limited_api may be set only via keyword. - self.py_limited_api = kw.pop("py_limited_api", False) - super().__init__(name, sources, *args, **kw) - - def _convert_pyx_sources_to_lang(self): - """ - Replace sources with .pyx extensions to sources with the target - language extension. This mechanism allows language authors to supply - pre-converted sources but to prefer the .pyx sources. - """ - if _have_cython(): - # the build has Cython, so allow it to compile the .pyx files - return - lang = self.language or '' - target_ext = '.cpp' if lang.lower() == 'c++' else '.c' - sub = functools.partial(re.sub, '.pyx$', target_ext) - self.sources = list(map(sub, self.sources)) - - -class Library(Extension): - """Just like a regular Extension, but built as a library instead""" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/extern/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/extern/__init__.py deleted file mode 100755 index baca1af..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/extern/__init__.py +++ /dev/null @@ -1,73 +0,0 @@ -import importlib.util -import sys - - -class VendorImporter: - """ - A PEP 302 meta path importer for finding optionally-vendored - or otherwise naturally-installed packages from root_name. - """ - - def __init__(self, root_name, vendored_names=(), vendor_pkg=None): - self.root_name = root_name - self.vendored_names = set(vendored_names) - self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') - - @property - def search_path(self): - """ - Search first the vendor package then as a natural package. - """ - yield self.vendor_pkg + '.' - yield '' - - def _module_matches_namespace(self, fullname): - """Figure out if the target module is vendored.""" - root, base, target = fullname.partition(self.root_name + '.') - return not root and any(map(target.startswith, self.vendored_names)) - - def load_module(self, fullname): - """ - Iterate over the search path to locate and load fullname. - """ - root, base, target = fullname.partition(self.root_name + '.') - for prefix in self.search_path: - try: - extant = prefix + target - __import__(extant) - mod = sys.modules[extant] - sys.modules[fullname] = mod - return mod - except ImportError: - pass - else: - raise ImportError( - "The '{target}' package is required; " - "normally this is bundled with this package so if you get " - "this warning, consult the packager of your " - "distribution.".format(**locals()) - ) - - def create_module(self, spec): - return self.load_module(spec.name) - - def exec_module(self, module): - pass - - def find_spec(self, fullname, path=None, target=None): - """Return a module spec for vendored names.""" - return ( - importlib.util.spec_from_loader(fullname, self) - if self._module_matches_namespace(fullname) else None - ) - - def install(self): - """ - Install this importer into sys.meta_path if not already present. - """ - if self not in sys.meta_path: - sys.meta_path.append(self) - - -names = 'packaging', 'pyparsing', 'ordered_set', 'more_itertools', -VendorImporter(__name__, names, 'setuptools._vendor').install() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/glob.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/glob.py deleted file mode 100755 index 87062b8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/glob.py +++ /dev/null @@ -1,167 +0,0 @@ -""" -Filename globbing utility. Mostly a copy of `glob` from Python 3.5. - -Changes include: - * `yield from` and PEP3102 `*` removed. - * Hidden files are not ignored. -""" - -import os -import re -import fnmatch - -__all__ = ["glob", "iglob", "escape"] - - -def glob(pathname, recursive=False): - """Return a list of paths matching a pathname pattern. - - The pattern may contain simple shell-style wildcards a la - fnmatch. However, unlike fnmatch, filenames starting with a - dot are special cases that are not matched by '*' and '?' - patterns. - - If recursive is true, the pattern '**' will match any files and - zero or more directories and subdirectories. - """ - return list(iglob(pathname, recursive=recursive)) - - -def iglob(pathname, recursive=False): - """Return an iterator which yields the paths matching a pathname pattern. - - The pattern may contain simple shell-style wildcards a la - fnmatch. However, unlike fnmatch, filenames starting with a - dot are special cases that are not matched by '*' and '?' - patterns. - - If recursive is true, the pattern '**' will match any files and - zero or more directories and subdirectories. - """ - it = _iglob(pathname, recursive) - if recursive and _isrecursive(pathname): - s = next(it) # skip empty string - assert not s - return it - - -def _iglob(pathname, recursive): - dirname, basename = os.path.split(pathname) - glob_in_dir = glob2 if recursive and _isrecursive(basename) else glob1 - - if not has_magic(pathname): - if basename: - if os.path.lexists(pathname): - yield pathname - else: - # Patterns ending with a slash should match only directories - if os.path.isdir(dirname): - yield pathname - return - - if not dirname: - yield from glob_in_dir(dirname, basename) - return - # `os.path.split()` returns the argument itself as a dirname if it is a - # drive or UNC path. Prevent an infinite recursion if a drive or UNC path - # contains magic characters (i.e. r'\\?\C:'). - if dirname != pathname and has_magic(dirname): - dirs = _iglob(dirname, recursive) - else: - dirs = [dirname] - if not has_magic(basename): - glob_in_dir = glob0 - for dirname in dirs: - for name in glob_in_dir(dirname, basename): - yield os.path.join(dirname, name) - - -# These 2 helper functions non-recursively glob inside a literal directory. -# They return a list of basenames. `glob1` accepts a pattern while `glob0` -# takes a literal basename (so it only has to check for its existence). - - -def glob1(dirname, pattern): - if not dirname: - if isinstance(pattern, bytes): - dirname = os.curdir.encode('ASCII') - else: - dirname = os.curdir - try: - names = os.listdir(dirname) - except OSError: - return [] - return fnmatch.filter(names, pattern) - - -def glob0(dirname, basename): - if not basename: - # `os.path.split()` returns an empty basename for paths ending with a - # directory separator. 'q*x/' should match only directories. - if os.path.isdir(dirname): - return [basename] - else: - if os.path.lexists(os.path.join(dirname, basename)): - return [basename] - return [] - - -# This helper function recursively yields relative pathnames inside a literal -# directory. - - -def glob2(dirname, pattern): - assert _isrecursive(pattern) - yield pattern[:0] - for x in _rlistdir(dirname): - yield x - - -# Recursively yields relative pathnames inside a literal directory. -def _rlistdir(dirname): - if not dirname: - if isinstance(dirname, bytes): - dirname = os.curdir.encode('ASCII') - else: - dirname = os.curdir - try: - names = os.listdir(dirname) - except os.error: - return - for x in names: - yield x - path = os.path.join(dirname, x) if dirname else x - for y in _rlistdir(path): - yield os.path.join(x, y) - - -magic_check = re.compile('([*?[])') -magic_check_bytes = re.compile(b'([*?[])') - - -def has_magic(s): - if isinstance(s, bytes): - match = magic_check_bytes.search(s) - else: - match = magic_check.search(s) - return match is not None - - -def _isrecursive(pattern): - if isinstance(pattern, bytes): - return pattern == b'**' - else: - return pattern == '**' - - -def escape(pathname): - """Escape all special characters. - """ - # Escaping is done by wrapping any of "*?[" between square brackets. - # Metacharacters do not work in the drive part and shouldn't be escaped. - drive, pathname = os.path.splitdrive(pathname) - if isinstance(pathname, bytes): - pathname = magic_check_bytes.sub(br'[\1]', pathname) - else: - pathname = magic_check.sub(r'[\1]', pathname) - return drive + pathname diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-32.exe b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-32.exe deleted file mode 100644 index f8d3509653ba8f80ca7f3aa7f95616142ba83a94..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65536 zcmeFae|%KMxj%k3yGc&SCTD>S1PQP}R5YmQ5=~qJi^+zl1UE)DtPsG8blp-*!#RLg z0>QIub24npZS_`f-)#|`^OhvIcH|hGc(UT^E}VYJoC(K^_@EDjE;rth;Yer@_4k$X3I);E0Tn+-Zb>&yT9Ew!oxAMfl)C z#Z+d`C?Ev=lGJ)}%Ksnx|0)G)SVf_n2-;d?f9!~MzIJJ-=wKb=iHfW2QCpC29wSNm zA=ztsPZ<@3t`2ENV!bW?>DIbrM&c*bCbqaRzr~R~Z-r)Gl=RG-p}ugUHp=<&@N<(0nQZ)pc;t^f@UfdU)Xs*a2q9hEj|W&QGS`}Q+V zaO>`-aSJ8yAtP2OBNk%M7Utt!$6gfgmQ40WtW_PKSW_r1oOg}p=vZj3XtBjwwJ#E} zLMNCsnAlP1f|%AM?kIHMo~S5v2kZEcbEs|ZrY(iCq{N>@V-R$%P-2fEhzyjmCh@Sy zXyr*PE_By~_)26%86IRFp9Ya zkBHB1hGv2=t60ZM@2flwcy2#L^lN{0=%0Q@MjzL)ErkWFb2Ro*N07ImOt!9YmgwvP zqh2yflmnST)@Q6JEa3kv=;e&Js^gRcx7ile@Me+Xh_`B=wJ3|47Z(=9j;P;M4jj9k ze|zYYnyGIobV=&smWsjxVw3XZ39!ke-gcWd&f8i_T!k-^@^CA0*s%-oQ>v?$_-7%o z(GNN8XT7J;F$I$PlNQv_oLiavAq4>E7I2dQhlE)vSn!y;BSSI+5(`L`#@q*i(+$dj ziMR82oKzstr3NgrEei6^p%m@2rUhVv>rK-H3%XZ<_rUh;c(a2dG)%uOg$_v@w_EZo zlu%GsR0^7TQkP%ahpqsf^)t)7t)|hz?tCY-06G}<$V~#?~heoED!!4L2akG@t z3k(cUbnpdgqwk%>`n0WAC7vv#rU2V~=4eiAwpse1#pRD3*UlGpF7&;UP%~^>-Uq9> zqqY#gDuX1JM-HRLrTl?xL1RW6Nzt8%&-UwXtnfuqbCmh#A4k1U7-%L3c7Zx(d zuhG+B-K2d4zoLVczO#ufnYJw*t5&k#)-NC8`0Z!%(?;tLH)1SS=)o%@p*m1Hza}bC zH<@{EP=$nZv|K=--J~^q2RFJ=UsK7|s*{A7>2riBOI3;B9VN6@g>xk)TvhhOKNMSeI?sb zNT@@qXG7GtAEH*Z*I7+?xX^=^+#cd{e*xu~c+oK%QC`k~8T1Fj`XSd4etuu)23Ly= znHbY_evF#lbUsH*M$@PjpbB6kZlDn4%Pfry7Wc9o2a;HxjOT7A9>$Ks0zkIpxF}-P z4%J+UwB{X!v+x4JvU3b1r4SD4dNJCLBe`P~a!!^eLzUU1z9JMV04G)5v%Ur4xPh4u|g#Tc-(r0PB00 z<2OM*Q-Cajywm3kTRsx?bLZ%s;?w6_FF__SF*1GDPvs6}`fAHZ`iq5gfrnJz3GS7o z zuc4jxwz7KJ_rCH-tFJ@z@NXc!Qxa$m*N_NRtT_d&`a7duuH`>P zd%}h`&|B{GYny6$%@oA-ep8*S_YbNQ*wMBx)7fGDgK2FaWZ0dLJaOehDVhGlqZp`r z7Zz^Qt{~7!1nOpo+s>!!UDMjSGVG3o1-MTD`U{)X0)7~njK(aO!mRqVS*o4ZX4diz z7)@AzBH#*!OwC!#-^rCEBXGL5j{ilBGXRTvrZEnIJKR9see4J z?c)sQ$RrZUz7CZ}&@|&(WWQ6oZG7`cz^_)daDP69Az2FAzJQhYnWChD$L)$+G%bx z&7w9mR1|a&sE6y@t-J-J@>a|Gc{fUJ9G}Xg6OuprJK#0?Jp<5bfq@`8o;q|BAqcJM zjQ48!rGWu;JZ~b>4p%t2&K3ny&6 z)6|T!KS#l1EVxey4i&6w$J3D-fJnmY;zyL&4M}ieC4Y4zD_DwoiJ30 z5_=SJD^>f%DnzwDB3tkBl@`9nM7`62cB()9jX5~Dm1WqE>OH3SAe#W)`7_C8+pfMB zJFd=-^{P|*4uT0K)k$y3)D9UFllj~KNTvgXauGr@LJse7Q7R@RDA(z2H9$+ML+eE& zl=voVrX{czY;0=zrsg&^7y3DBQcnlbCHkTK6wlSv)Ot^a>WupS(t25KWYtdJD_Ul0 zy-WLUG9529T3YX>gnVr^CFHB&()t2Q@MyPDf=8_?tuNH(m)6hH=0j$@t^Sg!YDQJ1 zuYFT*)BGE?V&5z3C3>UFt~~e`G$NV?B%)>wUwRqg;i@z=IXRJXAM6bDgMFlKS|1}* zTJt0-&ot@>P~uYMKt_iv`@icGQ&50s{!#;tR+P0W?sZB=UJS z28Qw#@F%T&Xsr_aIZ!Op21>PA8)rgy4p7O3{6Pz%JAtoM$hIO)F4a7n)$ z761{^!~%XE(hSewuU#=}f4+5c{H|(n(tWZhp^o;Mq!< zRjo5}SyjYX;$XSHob{6zO6oY4v*QvB236~|OfFpmxC~b5@TKpZgpU&#G7W#1xq3O3 z<3MV!e|?(f)~nX1p%Pni43kl^-$5TcR@NVMSZL^H&E-&ixCRksAc zLU`VdHD75rv;+qczU;=DL2Y_V&_vjEBUm9@4-7a;8wVN=CKo8r`Ay}yo6Te;LW2km zCg&ma6+&MnuR~}6p@HNqtG1-l;zB9z8^>xc|3Wh`P+C9Ga0W~Xtd-{^<+-e)w&b4$ z@#5nT;nQH;igvjVF^ojjTuW_pKostir4{9NA29mEyNid}uN|4TxhrlC)WdXd>FZ z?h-VBx_toZ4Q;2-s*De{^r4;Sf;^URlfi%h+fm{Ob0O76slOabjS9;G-(|(y5k&(3 zek#h$5I=h*8r>7(VIL+i{Pd0V+%%S+M@0Bp@q8Q%5#q(@z7U^EjPS`!G$(+(`k}%- z#O*6nN~f#>J!8|-`3^7o1-QI(ZAuFGL9cj-g!Tk8}ZggIXanNhBaH* z%$w8Ym-akCd{i@ElJ?9)6rRw2KnzPg>MHL zWA%sB4CVRi!%2H|Ot>Z(icp)l{Aa9616{Nh!pveS`i2Ma03DLWEO3U&EX$~V4~xO) zi_s8B{5_ln-a`((@w7x)Y?Ng>9x2X(W=@XB{D&Y@N&83*@i)+~?fi2zqnK&lp^`u!hZ&&FuC{jXb#dH{4o*tBfc6Xo9PY^qOa0PMpSJ{ZCzqsyow}p zf%MA>yy z&-gy^>=Dmb#gmKYQSodQ&%=1~zFyPB`l*;#0}pG&_qGPaB!9U}cE=Aq(N(&^msURe%fvtfy@-U04P7ip72!ds&zS{&BQP zfb0S1(?^*E(%8XXe_@jn|0by6J>q*uiPa<2GTum>1O`T;OFUo1v-y$F@r)f;V$*<6 zxxSwOBxBbhyp$c;NNYJb+cR(3rm@O_gUW%XWqQ=+o~LhwQWXHG_$SW z5jNrvBb%>H`Q9&KJunO7*TYN%sn3?(GrjM9l7u$cB1!?on^i zxm~?p=dyZfRh62Dm=dqUXFWmia`&ynVMq6Z;jpdSi|}><(*!Z>E*$=p)}4=V)0bCj zv$1@#`k8GT@C_RK2^%GGo{Z!or=xEdC3Sy{6c(r8w_3+22VPE8$VUwk?|v1ZjJ?#d z?luIe*vr0NEPYiH|0;?VH0b^(Q6Pm!7br@3K$LQ`y0q!bh+5I~B~(@{BERM z?U4}bzJtJg>$C~wsYFPs)mz=A_+;Vl>b`0??CGA4aEpE3_1cuC2W)e-iRD9CL7-ID zLCiMic?H0A0^lhkGFc%~0KX@IHA?JFdf%(WUZeMSFj1hlro{Hsd$SVTOYdb$?3Z{O zdx;woaT2be^4!6ovG*{7T!u=A;%kW$=Y`c7EJ1>o*h`$ppM(Z)v6oxb##)uwlhE!L zK|BbE?rM}zjMBeG`2mMsRATo-#`XSMNL zPiK55szNTw;(m*0{!-DMiCyRLQJA!hU8fN=;!ohIB&twBXPo+q?3dk7A=(!wGR*;f zmH4Ab9Mw+-q9dQRF(aRtkO%#|sinU_GzQmLfG(6X%$CM}s#}Tu+JSZPpq9P+VJHV9 zPKiuBJL5!5YDD)oz~~%Qe-}8Rt@jtTDY45@HnsU*=;L2kq0UjBUo;Smkm)WFrzQsz zaZ(FGek(>;EF>{BP3w%4xKbs_@hyu6ngw8|fTKh!qlHy>F)CtYnXuY`0oli@9KP4p zxmNRteU+CaBSCFY-H#O=Jk~#|5j}R|7;01ZpAg)=bGW@hevqcf-LE5A?_aO{-~#Ga zVjtqE_ur%Jcu}N(Q~CZ}jI(RqYcK--f` z*$u-u^BYl7987l&tm;-akLp~@;>4P3jf|vh1&xdm!gT*1BCt>!eya-TOo@qvzBZ|e zQ2iNDWtptbp?AvNZz7_NZTj+?+C3IKAuc7urGmA#W*FkVeLpeU9(>ulfC;|b-cb+0 z5TB6^X%XtM(`pIQ=fw7l3m7PqEu?nW_-d^ex*@!pOr$qxsd${!Og_Ogsu`H35A(O_T{B-&NY!RG*-ckbdHk+HO0|vjjb;+l<6Mq$Ue>zCnpS z2ekn9jv3VFG&VekjGbcGz8tU@^*K}|I^kYGwg>=6O-KB9C~8h~{7t+%<45rXFG$@q z7euEagA%`$O73*@wt3Wii!!}!nDQtuEgDEVNO&H@L}t+dCE6duOzQXu&}83R+a_*t z_&PR>?K`O-m-^lvXQA4JXT_&C#wmJUf{F~PzJ;U$!y{?@r5_;)a ze{z;kSR(>#DXe7X%}ph+4-@QPELf`|eLpD~P<#ctkO^UZ+OJ**V<{Lc%j&ADlKD^D zh9X7D?5ESzvDO!l)qQ}Km>9K-c6Fh+qFvOf78^LViKdv`C4?Z?Mm>D}Ux7K>T~>yb3k%G<(9(Q-eiF; zW^X3gPV@i@BfZ3523R;XaoaM4t4g?fQVe|xA*Ok~9;8Dmc9>rVFv`@;FdHt*cs>|&PpyPe0UP`2eD=g zvFfgbQ|!MPHa(pX@+5W&jIJDok-l1%npPJ!4WXp3E&+NLPGjwF!I|Z_iN$Cc<=?U^ znZZOzzo$!rJI}YV`NpupW2zzj{GeLXVuu9W`n0TN!|A}^<;Os!&SP2^>!5w2kEXSK zlwqH1ZHplztSactN=M`gEK3rV&LEFnX(6w~j-W+mrHrb}^}uPE_qw+H$a{*Nr4ow8 zzFGz?FS2RJF{5dTqbb?YQR&zY>tcGecNr|O?N!1;-1-;v**su^4QMcbISfGyV8u(} zHrJScDG^rhPt&Lre=8-P)A48e6~K=WdCcfqdgpaqO6I^4`F zK}}d6kG*)cjinU7J8j5RgJojK+lx)wDSSUVPHfMn%&-B(Q)XB@^Sg$Yn#i#yh~@O~ zVsRFx43?7=Ef)2sPGY2yYNLx2@%IoSZ-cY2)IzclGvc!#BZ>GNJRx94d^Q3p^_h5& z!jF)M8oNlT7}k16tTxu}c%&amYj-5hh}SOCB5QZV4~f@Pt>X1d63xedAT%NiI1<&4 zPEnH$n$emj7>RQLVK)z0v#L&k)I^8W+9{AF*2UBSh?;rJK)tBMPMUdlAe0b@qx*u0 zz--_|=gQGEUJdhoI6@_ud5iH05LI|VzDc?VJ|^iFrVO)~h{mtX2Rs^&JPJgM^)vaFePM&_EvDU)I+oE9Fs07GIqHqX z11^%P9Ja(^f5Yo6;XnHbcrS5cpTmkjM)3ePJsfM5_ylButt7FO8?^&$xs!Gcs?X>b z2Gv#YpGi2Dv&9d&6BQ4+j6e@0KF|+?vzxumV=x1vQd_)ri+|f97U*XuQLFZPQzNv0 zA%k>}M&Ys)3L$~QjeLSY;hfdNb|6kIP96bux0l|%;oDvCM=09?jfL4?gx*}APLf3? zdW9{Oqqf`4JW7W@2etzEbQtSkrV7NztT#^ri)SK{5ncM`jbVKA(V8A zqm5NETDO0WB>jd|L}{&4iQSGss@PZfoA}gSfE3HzR_E;{tLUXvReu=XF_)L7-vPGW zI1T&ug(LuD|W&H7y!uIhCFTlmu0not*lf@ z%PpJ;soA9gr~1Dvt?jQ$qirwINSJ_!P(z8X|80r;trDZo$YvUmPe56~N*V7}HN7l` zUbJiFQ3s!dfm&=5g!m1pD2!1O-JKPJcN0a2?d;iL6=5p90XQYcAZI!V9BvPRgvII= zWVx{*aQ%P2W9=~sEz*<6$Ha^)DE+C zm#>U`NgC@|U)x7%!fC|bQJSw-Fsaw?)Kw+OUnVmHjbnB*a9TIrTV@F`=E$%dDJoE{ zNHOPT@UOs6VaxZVAY)PTUsB>f>;z*ISlRduY1A6QU9eATGOKj5!%ZL9;a7P+P4oXu zhQz9+kmfozzo;Lh`0P4(oZbabsc?{gTtRZ;^mW2kS?P?m-mmCgUm2CoWTw8v>Cs;? zS0SUm)`78mC2JotUs5$NFlJ#(0K^R^uLEPJpG_u$FQLQ_~`{8sIac%$yfJ|br?mbEn9!Zyl#plAg(29qyxaq993=Nu)WqY^=ggyWgg5_M&Y zpdmD4((h4i*n9jYW9dMOmd~&%XK$OXUQ@bM*2V_;Erb~neJY5aoK)H1r@w}B5jB_~LP z2GvBz@Gwye!c#g`n=Ob@$5oF-2yJ2=AEdmT4d;TyC9{qB$;>+bA$=O^jVu&HK4E_b zWIKwTm7;yh4(lJs-b$e-^uex8 z_YNtpTlEe_{|I}9wEOK#Uk`1z=?18z#e^6*kkn=swo*x(4YhC;wXpuQ?+@x&e6FkI z8K=b5&i4oHt`OV^Qc7$M*n^!!;^NY>CiIo+4e=k6IRnWQ{b0wsmK&RX%S`$|=X#ookhCNZGc? zMGp@>=Fr1Wk03o((_?+&r6#oIX6-0LNq?%hiiHo%0Lbwe>-T3`g2EIsFYSshpOGWKvb0B0J;;R3Pr9Ne=4_JFJCASN1ch-~a<)#uLsJH92a?)!t@ ziGq7585s9aau52IEp^!s7afJ`bq(Jt%A&4Fp#vW95D%=z4hro*uT^HX!3zQ!R7%dI z%{YlkWf*Ybj#f5>UUqM5dusBp-*XyMDxo5XAHRVjECJKc!11LP6L%wU4tUl+zKk7) z-tcbWELAvkSWx|4Lu$xv}(&QQafl&5^VedHR?41qOhCL(SzYfG{apR7rXi zehd6DB<&$TH((+Lff_Licu&>&&Z=;Xa&GeQ02a#831Q&@0{)cwt77%-W*x#g6dew3 zZ&xR^NH?~t(2;R}5E$jTfD_!&veX^B!!|{mD)!dLfiakI7!4&)nwbF?Q56J6xBCB<2Ts%>w%swm z5p;*KBsC>VeZc1WcEMA_>6oUa+}=pE|FnRHTlYl^yFJg$z<7}J3wq`~P0uM$(zEyp zdX_zo=h_{4hs7)BMe&;QsCcD6EMAxH6tAmx;PvNY z?pKA-Fd&Lp!bN`fM?ZqJfYZweK*9>n#u>pxsO*bYa7Ws&dJ+>Tb%xFz>O`IAsLm=O zQ2QL1+O_W+C!P+B$?f~bQkVu*9G$TNH?NtfET{|e3vWV$wJOgaW^Kk+2kj|ub+&!r z%5F<+b^ZM3KYxLSLd)A|w*O+oYkHMGSoBW;P+hf!CE(DpM0 z5b}`~H#WHA9D{t&+~_d#B52-Al#k5v7eFU(YjZ4}1Rw7A4d+_op8>QZP6-}Zt*%b& z`Wy+$bBC4Z?7qXBCKR>#gNcW8=zG+2J1;>KfMPkenBcs6613dtOvDF}1+@iHGXVyL zyW9I-&s!VRgnTfUyT5WT@?XTEPx7$YC8f{O>dh`&23to zF~!xgBb|y(j-~lg9wm7w2?aIp$RKhh<&KyLNYvB=$&f|G&iHAR^HX5#J#vKzvqvZ; z5zD1q_M?eAJ^F=7o19IHb5YANYaSx^JC#C#K4-ABlVk?97?-pKri`J`C^lj@Tbt2mo!F*JPJ?y@BF^sVe{vm+d zqdEL61~0Kn00=xne8s}G?|LjIF2RCpJ-QOp0mYg#shJ`Ey|aMdO+dz?2ouoA2GDf? z9U76r98&W8OgoJV_Ce35rr%IF@VKibjibJerNfk0;jX6-4r)_7(zBJ1RbB^Yju~&e}L^~@^yQUlTv1@ zBA9`54bp31Vp;A`Vs+FFo;0-R!Oux1PR36uu}UPq&R(Gd?_QH z-I&v|IKQB|xp^Xe=(awPG&MqF<&%bKZr+(s-#&t279BQ>_IM%5!-)So5yF^4AhqV( zL(&Wq!DjXrC3Eh!|EY z7vSS$K1aFuPf!CESr0vX5x~160L22pe2&WF2S?JMN02hMS{W-)vY$P42(hb(MT7jG z0Kgu46=5+oFX{|(T_hbv62&x8SSw;YiXi4Zi37hwjAfQJW6M;XSo$borC~ii8Pgl{ z23`)Za5%9Q4#YA!CT!oYBo>+6HO(c(p3ZS!CvGTNzSBX%-rEqrFFu3 z0Co?&&;<_o%rvUkg%%s5cxToQ5N>rh48y<;K;Ii;b9{a3 ztU9BFw-Hxj#G4%AwBo~BI7~y{qtquD^1>whtP>}mT4}6p>h;5OwHsqC9ZqIF)>vD) z9`m%V7;6i79wo0|ml|-tf?lQpw*fhjoj*v*f!0om%5|)ayzKeCsC3kNR>)f$KpTZ# z(oS2Gu8>(A12ijc0u{}-(1z)|n~*@Jn~B)-r;p}a=23i*SyMmcD|z_=^+VW1hTN%f z(vZ(5bO4ecS%Xg)sAi!w$^tEC9))hiq5*bPOw_*ztWpE_|GlaQ{!Z2H$A+rj`9D={ z=EZ=LI3$p&*UY0PvmQ`%vRUl96ePQckb_@ts@ZwX1kkaveV8H>K#_cc^bsVyzH^9H z=5C@AQ7jit-+@eej-XrjZy-qM+$X4WAH<%?*C+=za1i?FCX6GUl`D33`!UI0WNdYV zc!d@**%TtCdBS*zs2`zLnixwFCz2Rj*LOTbOR4gXhi*l@yt6VwDin(KJ|WcL2{ELQ z01xS2_@d%yBd;a^VFhp+mFvhrvzs^vVRPd;PL|GLdruy6@N~4G9q0j96kkkAf_QJX z2+%UYGU1xVL=^aR|05&-o+3oyB@x=T#j51j9Ez_8cDG*jM$lQ1uh>l_uohmV!0kO(LP#4N@EEUEoXInA56`O0t{sKJlZJrhT*oyhB*gICN!iv3O#j32> zek-=3jJlF4`2{6_TwNHotTB0O1lr;fG+}riY+8d}9p6U4L%mdI_0qplMx>#0CAM`P z^3JT|XEDzY`-GsY?(L>fDo!{8YcSNAFr^I_G8MT({BkOn2e5fU5+J&7BR1$EhzL7* z)C!{q|C&MXejRWO7HlQ95-6}@;>JkpheGE@o~8F5C;HEPEAq66kR&1Ugosejns4c4 z1cAIHP*Ykbt&Ao)n-mt{*6AhKP?jY%94~Hblx12JK-Y@>_8|Ya z@ic!yo#WtT9ZhQv^f%X^?+AQJXI8yOn(O;J0_UZLCI zvK2;A{g4N$!BrACM+=}HS^&Y8>{gx+49pBTn;Or7&0)~d?^^%W(6Xq8yvIX)Ll=!e z*wS={pMFrA$mhcL+bNOhSZs5^_4yh!1ui~0e3JMy1D}!~Vl@W`hY4^|f7+$QzK1ln zMAo|oja+PzpfJ7bbNw(p+ns=bCHrT>9ey@n*N$Ez=Xur1SBo$?&gYQTNOpk^Xaw}_ zR6l~)D4|tHof2!J(sAHyexk~T(_~BXi~4W&UBF?rtyAjg)El2yL=?b=>p-$vKkPxR zwAFGyjIrd9F_|1PCa^X*UbAC3yDeO=Q^&Sbr?DL#6@K`&wKcp2YIo*AFcyszm!j5| zYPnfXPJl+OgQ-YV_ZoaNtm<&qO3g~q3GRleK3%mOhj1-}V-2>KW!mcyelxy;ubQEC z)hx0P>gL3T&+t(6O=xD+&fle0>-{z*HrGlxLJ6P* z6xe^eG3%&($pfjV<2y?PZeXVz>$Lmt-X}S6iyKo8lmZ5udmZUzmo0=mihCbW!DW$U zC?|3ujnvSR;S!V~*Z7@Q8ITD0$oqlgyp1Ix{w_Jpf9A7yMC~ukowZPk+<`)h4#N-~ zx`B|O;c=|D*FvM(Dgs8t-bfH|@N`=*_|`ds>J=6Y_VcmpvIB$y(5+twa-`bh^4O%v zERS{8j64{(^7QTCPawj{E9(rUYit}h7g@Mp(B+rD%YhBM7<1yhjko^ zmY)OsH;9v_@%1SW(nOfOU-XAWxkK-FG;FHl#i#~n`^z0+U;l=xeZq~Ye?uDUw0FXS zq=3~1_=XRtBH%J1u?Slf4StbYpGsA)ZM%?$#y!g4gc&=$hmLyDlC={t181roA^xKH zK*znnonf-!iY8+`hF#XfJ0bma#_17&frO%jJp_&EKzcMEXZ^8tMkn$yLF%Dl`Yw>4 z?>r1>nzNv;ej>%FDeTauQzHP|`F8+mk%?fR2YJXB3A>$Dv}_6O>pJI`4$z|xdtn_L z6oykV;-p@u!#CLQh0w8~eVm}^@jpS;!SMOKAImQEat9glJ8{GzLpNtNa1>+tdtj3z zb%M&K;`9!1SUAt#w!K80p86b@7Gy)H)|OV~D-R!J2Zb++b^AohUj#H{RrBnJmFE|_ zYeUNO-_7tI$E`+ke!O?%WY*}!{;KbMLl#>m+u!kBXc%*o-a5Rq4TZF7J( zuYC{P;2|#eZ$@ns1XCPM;#jMHR0+Iqo+R;gfNhVIEl0M?$&$E-bVmD-o(%ETU_qK5 zT9z0VTCrP2XVN;7yg+nn}yeXlfp_N`W@{h;sg2D!9UbKq>XwL38e zq{ncRI$BE>X#GOE<|NlX;M7fa82thi>H7$PRKC9C24uAi5c_&!R{iJ)Q_ zaOio=e%|+XW8t@sIN8<}`Wl?tU}fU-6#9IV{SQFMcVf#QS^WTZz_zX_`#$!*w5-m` zH6-xKm1R4J;@c^{qzuMH>wApi^UHoT6pvH<>axU8{6UIOE&IVx{2_|xmi>_8nJB*n zadYDu>~fw68(Y`FEdh`-aY0k5DhzSZlrYqH+z^mR0xLDTKk@=9OZhIIN2I@h;?I4VwyW0G+f1n&T$xSJly z)#j!Z>;$g|Bg4t3LuMJtJ6XHV6?LA@Gt{CgEVf(T88SN!jZ-e9VBAUm#{oibH$9RQ z4p5tS(<3?N0JVBIJyKhjK|TR(Falj++}F_91H2Y(BM>`j-*@0pxZq2!_fd z?y@N3(^ z%P&G^^+@ezF-7zQ!m|l?sHj(CaaV|o+_Jn!u--yr&%?AHVFkK)fvVRhFEUM$v!Pjt!3mawm z$cOr0u}Y{--h>0H$iPmPH_a~#tJg+twfrpT3RoIRmxOAAyzy!<5uD&a$ss{`>32d< zFhttVlHvaaQ((lOBmugVkdySwv9Nm*6o6ntcZQ)%Aof&0-zuOeDA7Fov^5QaM?$T) zHDqM6KVt{HldRJaBw5WOT@a8R#&`%%)BG8l3pXwW2L5XXF21XzDf>J#6V3{9OGa}V ze3hInQ%(rcr%lZo5J{5?QF>~1I}h!B`QF5u~Rs2ipwChpEX_Z;6|?t zS=vuglB44$6TCJcp=C;}8)#79sg8MBT1I8^?2_b%;sY6R>Fg;G#63WSpv$!3ShV*@ zGOco9)BF|cdBXNG>;YmXNOw+PuhiC5G6Ta+Pcp~b3eTUw0Nvgf7&z7qU(Rtii^|hh z+=K=l(Y~OzfCbd00!JAr+&V8yU4-lV%5dg32;iCgT~aG(WKK&4nrAi6#7b?brO6!r zd36tj-g!*n>Ku>RA*;8K@h7Y zXIh3Wy??VdCYrWv4}HK5RiXqes^Z%LMDA8rR&n*l%Sd9KYfGo8xqkmz7~juZuRpWm zXHXlQLW(+TkM;Y5b-30gaL#-SE+?SMHSnB!6a5C_AU3@g%m04N%g+IdY#Zd^Il#kc zJNa;7VgM`BFHjt7Pp*J_y$X}Q_Mn;fG$r-;&ML76&=B|Mj3IB23-stM>hK3q7yl4) z3c&~3PMC6^L=NGYg!)2t{NIa&T&F&eW9ZP*o&*eo19&q+r=wu++=r}t$W0CCrI8Bt z?;&^5lp@9Mtk@yd@97tUQ(O1al8^lV4HFH{2Y0GD@pd(<@8}+KbV#noom6OT-m8SZ zHsICz&Ah`1dwVQ1AiWQXI3})uYbChAId7oH+XLUP%mcTfl2|s9s?}qu+GD(o?7bga`z(b7AVKfwQ9bd&7(*ohyh+`4}Ub+Og zv~|&8Yi1q(z`|cSP+@cEU4GcPtrj1);c|rZ&7h1mZVgY->F%t)Hmt1SgWY1&+h`wk ziIt#zPP^Pv%D*f1Vm5JwRO$jLT-;(^AH~_i0pz?cc3Lg`8R!Yedb}i4O-sI(SZGo$ zMQ!bgg@ePPuZBYdsgTgG=p#sh=EN=;YjpX}YHr_!jV{m#ESP4%jjCI$Fh$&sGdARG zV{Y3xncoc?+o-#V&cN^r^5AYFTt<{n8}c7wSq7U?=`yzxe;l~sE+qF0w9H+L-P`LS zyb5Z{uB#34r~ixcI=Kr)c1o~lY7N}$NT3DGrK4abA)Kgo*3{O8qP9e}yQbEtcfuZK=8>=> zqZ=+=N_-_{sg~iAwcoHMUl`H~|DeR_&;rTZH|c#rd1w{h)U0FwDVo)N8{&f24QDbFm0TU4)q%80Ig4cVPW_N8w!k%Rwl;KX1G`F?VBP#ecb2HVzT!58yi4SA`b?HokcpJnUbfZl{PF zk>oRLejvmQH=%*0+DR7r7CLCtbRWUtdQMc0GX~zneB53WmY7JsxgPxBf|Zod2bsaC z^#TUXFw*vsD8s3eZn3<={BD8y-F)-Avv^(#5HmvD4qVGVp>f@NoD6p6G0b_;>7TGK zSQ~alR?VS_5WXJ4chmd`;}eKP*Ud!gqJH>H{=^E&IvG)+-cV%M^_&01SS0H0MKv$grs5Or# ze{;CeD&O0U=GE4*vNezey^K^nxg<}=whvsAzk~U#Wx3i9o(+e0lk$hTOUuO;4{qj4 zl2>04XBKhf3p<6i#H3_&!u-@$Y5C=joC$cF{3W!jqt2D3>B5^fj~M$Vm|SQkqX41q z2T%b2Y3>2D36oLt^mS3MHXxT;nz5fClr6_(g z&5ZNmC;~14*6HL!T?_*!%vVHtjCz-|@_{NWfYVq9UHf&K-&hC=^N&yg7CXr8M9E-I zy78zABU=W%n&G@W?8Qu0LFxuGkGjMv)ARK*Kbna$O|6T+L`^#69$NTe%8totm!w@g zstZths1|A@RqXFjEbE6;4?L#pWi+}9BOlnJ@if*Y@t06S%G-H%h(Gyfd?E*y<6uV~ z#6AVi5o+s34s={NLIlf5uA;m&lJFu6NR3z>mHe*2h>?FG+|6B3U|-OciP^-Shp#}#vXgWHA5YNa6U!+q zq};yuH@J$N+-9bU!#^pzU+qcXRI%2RJ6N!&X5ogfS!cW}_M>(lIwZ zfe*Ebf@|4$_;a(+fU&e6F5DR2dJoz(we3sCE&7)WHrk^L?qs(*e7DNlO|*U1q<`tz zFp0fyeZ{_t!7Obi5STtGS&+D;Yxv9K`^c{aAF<4kr-vQzf@8HZTke1_ zmA(3$ai@cpRCwMl!x0N;(N4*zTI>7u4{b*MIVBEz6z)~*XZ8JU7aY+A;K^H8`rhA| z#@@HXm?m-|yYDTeyybfrCsN?||6PagyRzmxAaK6m*)Wm4a^kbTx2CJWcd^}}O(&$T zOD1is$|nkYqPH#_KxLQx{SSvHo)AToTevB1O*7qscSN~{T$U_eed zkFhYIW!is2{v~+Ic>0#e+UgdNtGQYkY->h?AtOhv79Yn zC|3L;L^vY(C8_NL#a`w7Z<;&Q)?kGqzKblWva^D+h~g})^-+JanYz>}7pa3)3H#&j%?M%nM&-lef!)5j zxF+{ot!{W}P%Xn+lGGUvThXOjoAq?c<+5_^5yIE&whQ>kp@q=!7ai>|DzP=9c19f$ z$s>&8F1nuZB+A21Ac`DkZgdS-L#<8zL|-DCxMORp!%Qc{SfvY7W`--&hwRbd0Jad8 zc=lZv7M)4Ey|on+;3sDoV)i>|hh75n`- zH-jEcA%g)`CS%Vo^jhM_(t0R?r8p(9shquB^hR5^6FWQ$^{ReTZ$6`7g^<`efS2LI z`*Ubd|3D8#gO1K7jsQi{X>oV6_6pY4m`A6R=Sku=CoWqz7RrfR5Ri?94t>qPR0wyK z7ypI$rKPgGC^KCCKePnH(pwNhEInLUcsSYH zMK#c96Wcyf*vntjXy@2%131BRv+s+&8T)^0jzv~DGRt=!UY=RF%PA!+PSEVc;+x04jyWuz`9C8z0a zP;et3AKyt09HrxKlTn%hWp|r{ZIg}rF;RCFy>6=>AcKtZ{igs;$2D+d$8_A5SbQzE zWQCGl#p=%`3N9G+E+|OKU+*%)vT>_}G|H_qp1!cG)wL|ngccc3S|rnlI+%#ZR zT-V<{52V9tuLLh8L3{Ji5gV__imv8s%5AodpfBay=|iYK@SFKaA)n! z`gu>Nt}$DG-8}J`UfpjdbHH}`%ci&Y#3wXN=Lo&`4(0{54(6M=w14Jc_S@PRz1T~Rl^A0wq2=ksVQv3&T--P-z znVBn^D-8S%Dw>y7pTWRCJv%uY(qn<`5JRE`J$=%kf*e{lfB-uER!3^0(2sg#_74u@ zeg`UK|3HdCiDBCf3TcQlZ;=fE)DVDCBd73MX>n%uU>mry8C=>pv#Bv#(y|5XL25qF z^05&n9mv|!TtSltfaHuYXx0NX=SsY2p}M3?Oo~o?mUROZ8H~u;#u#JqSQ2{ZLaoPs zjN}?g*Fmh$vE0P{He)`F%a{13&^QZnW3DA83tFarDJ79wHRQxiju9p&yOE5s7iX5S zPAT9u2VnQ0f2q4R-q|na&DrhAn{dUUuHF#hhY!*=#Yui>7P*An_97irPU5O2oo*Uy zOh-vz=E?#LyJLd@1MDHwJ>lqR{3b&uuKRc$ zRa&(RM0m(TfwmKzbj_mbq{47k@OqTc9^%A+hT{dTmTLg5;Yh9^SeHWDVf^ zPG5p0ObJX>BS$}QtpRL@Mtm;(zl^;l;yDM;Qq3i-!QHSe;4YHOc?FQc!u3kLQijC| zsD%F~sDR}K4dDj>ip4gzraN(+OJc5dkxPd4`v&&TmSu%$r;c7Q_Rd1_&ATqgv*|(_ z?NHdXIT(ccj?t#VW&9LM1V(fCO9+gvYLQh{cRA|8$m z-~lI6RXK*E5J9AvdGFyn+a;(a3c&7Xd>(S*x&q~)n?QFXUV&&!oZ5%W|Ki_-47X%6 z(Q0oier1I=N8(f&F4phVH{(93yq4hH=B4MFtN%i`>qOJ&mZjva%7L~Zf16w=u@t|N zC8*A#SM1f;Df0UcD-S(|f&m-%BOMFxd07fk6SCe7GO?X$W$1$etD()gv9Vi~;F zCn%}JBUFzlG%bavdIc_e2^!)%?=Kt;>=SrU%PeegG`3XKr#yK6E3D-&$9I<7GTy?n z`3_|+%QY&LlI~o5@E#!+04sw(UjlbAOA19tfaBt{6O-buYH*haS#ZIU;3SqHLg-Hs zuSrFMHxltGM10k*4W;Z6`f7@B}+rAq7FL4k^cPF$PXBT7m8RsSpzmmpDjw z(ki70#|jhi*+>t9d8k}VN=CZ*CV?+O*aWS7?aGcDMH*FIBw7N4g!15Gl-=#Y7fUc8 z@=E*|8dge8sz&-qlL!y}Da!v>O{!#%h_6;(D$kEwxNxnGW=+sVv(lnD%hwwDe!ni- zoR)g6HC%rGcEK}))V{s{`}Tc9qC{HC`gjazkX!(kNl;e$`2}+?sVj5N5W~RbMG#Yeilh*{Kq7N- z`TBlJleBgEegUIi6-{4RDkK!Ye(|3$(WdsYeuJPfC%GUcy$8s6o4ht97ee3rVQ>{3 z*i>?fSUVT;29du2q~QO6pzaa7^iC!aDH2SyYB^>J-q%+0le@$TI#;BJhU*x>X_1dz zx5<3Im6y*H#lbF0#fZf#2J+6~4Y=t%4*)nya{)$p3vFvi*Ad5XiK~d{2YC_&;{G)_ z^N738ShjLt@wE>91DpC%ke8C8!RXHHy%lqCamNHAt94P%)%{coTzgL^C-6sytKd%{ zXq3?0V#s7l7}AWv0d&MKAn8;p*_K`XXxr1skZRj_e%o+C)TVz&PM8vp$=Ak8g~#pgOEkaztzB*z)dvpU#TW*zC*i%^otfUrgsgxN5v5AXO1A$2ZMX_kg%wV(7t+Gz<}TVG4u+y55@fqQ~6UsY}D@M)fS$(ouQTV5b`>jrzVexEzt|w)aI#N zy*R^HVsFpgJqzGszw-<~`_IG)*zc4z>|D6(fMAI483X=4!x@xnA5Z%tk@9F=du4^mXSwa*9zdvm_ucS4CD1|OA7qubHlHmx|ZnXXEN7wgnS z;0*lz@p~IMQ+O2fS>f%E3)S)CGy@y{NI!rx@H7_Z?IdD!#rd6>sbX_x)DhIFP=QW{8&p4&QuZtn=V zZZ64JWj}sasaHP&)^HcKRrvz$Mw{OVxOWpg+%}ZhFHktf{@9bmBIHp*J5%CknLM~! zDg$THjev(0pF!ntz^E@IzYsSTJS0hu-vSnn7@Eg&KT%>oK*H8?Yd@n8?Q0LdAhvwJ6fe`RYRwH-s~!y=QFLVp5(V+N``2PuwrW)S-D;7ncuuNm@@yQl^5 zq{4{+04@|hEdqVZ!7$Z_Giqz;*Q^}1waE+%5ds8dJ=VAn`)kNLqK&-#SD1*x6dLXh zi>|>AN)PEo(K~LOaHQYF8ty96%N`FY>%bYTCBzzVI`a7f9wl}PErhQVybREN)Ngz~ zK(XBinxh53W5rw$6x7C7i=e;-u05IF-tOm-duy5A-?ga(-DGv@1pdNwP-OsaOTX{T z6jbRHRG||$U!zJtr~(%S^;t9)hal$sQ0PuX&ztZJw0smo9EP4mYn}Lg zE^>m6i=>XkJzX#^h#3U`@gu{ROkxZINommdMu`JO2f|PrvQbQc$+@G%oE*SJV!9|q$nP8I z6q4UgyoLO71cdzNgDEnF{N|6yuZQHrRF!-bZb3l^*8N6734 zE>CLSUJ?$0JlMN{egkf}CFo+la0=L)c$Q$ zUfysYQH_xMymQ19{rHMwSr7e+IHEIg&za%wfAmLxqx*k|M0C99esJQ&eLrE4S_+%) zUwg>Vbb$Q-w?hbVkqe)I`pk_o&lPVc&k%1HAN&tWck^EH&gY-e`+EMdh#!v9UY=kcH7tsnB68~yxYkyOEVh<6o_iT7f@ zMZAMt74JLvI`Lk{*NFEDzCyfL^E-aqJUeD)>x5{UW_hw!w-dlJ9 z-h{$)P2e(~OR3MrC}3XE}-^0h*?;$R@I?@Z;n!79b&OJ9~sxztK=`_fmWQpQ^;`M&hksT7-)Qs7Hp zlS=su&r1?|-{HaPr;z-S7Q8-#O6UW^C%za^;g}z92r4(tvF!fmr5a zJS;8b)P|e0exUHohGYxhZ`mP@AX0KDZ5H&@jzzaO0|%#HqT8=uV2JGLdyRwY6Rw{P zZfILze29pq3yoW+h-X>*`ylx9UblY0a`M9B*I1homJT+iV-t39e{gq<^GEivs4|2< zxIctH(uR%w)Tfph=Ogy9)$eh8aj!dan?uoa!GU_A&X^QuR$}#!sT!$NiInD|WsypK z@cl@oUX5VR2hjPJdRQURhZNc?IBxwa}Ch{Aa>SxA)w3SZ@#Yhsy4 zP|l_8>llZfjds`wlS(vm=`-E#+XE-j-OE!V~k5Uu8(XsT{F^SjbV5Wo>62o zT<|wAW1Dc?Ktd9tk(*OB#{DS-|bmL}j7PX|FWyW+mHw#8tcSev`A9oJxVHI)r zIzJC}fBtuzsb`lhHyq2B7q(vsO*?GTbSPF)F~!QACEpi5d@MBfo5$}?)3ya#pOeb^ z+wDFs;M#2aFzVB}Ee+c~O(*3$?mBTD{FwqQ1;$A8#-k^weojo|>{!yRpA+kEvH4q7 z>MwSu&baIjt3t*2TVnmKu~LS|yF+cW!eGx;N{A6zzSehtC5^Ypb04q^cm{Y9*a18Q z+y?|QzjnMK^RDB#Ca#Hl0`~-N2W|)MN!*jTow%L2@I~+HYO)IpN3(UXHo2uY>8 z0LRzUv=IOkf7x;r-b;<6pRL-5ePmunw+PJ<3EQM!11~D2E8GcVdpcp@Cm%l6MZUG) zAeYeTH)!c(9!V?GCugianJ9g-g|ZMr0&lyA=VyR6pmDZs%%S=@HvfC7_1;&l_b*XN zOWDF4X9zb&)&27-M#UiQDHLcXkO|BK76Uf} z#lTvCwjM!SkHAgBO~M_5i$(9Rxo{B{{aPX}0;*qg;5u;axG3t6?i;I(wvpa_zz*P- zl6ItTX4`0isJ>9|)HbRgs2gD{zg~S8nQXY9Z@mqK)Iy6ygSF6p0HGslrCqpCm`1G2 z;9Z;(^RWclWeyq46nhzTuGJW9#yt`t)dX4tuLo}cfojU>0>2U&dF`0O*a&!`g`0xV z_4k;kA7(QOzN}0Egl%J6RIw(gU$yQ}!0lkN%H_SXAtlK|yb2Nn4zyTm#DsuFp&Ma7 zD86p=D&kt?qCiXFwf2KdgFYlWA0Z&oE$t3yk?7jCs|_Kz@3TpCaH_7c61cce0^hR| zfE^y#9lXh7R=MOj)kDYw_3Jrdm_JacpQ{0d!b{qMmzevB9VT=h;!((XN0kPz2uUxI znxI8Eu%ykLM9zxn_0N)pg_>Bl_LQ`Z`7HfVfMfuoFEsK%|J+1JYkHCh$OH%TVsAA&K4fHf7Uk66I`ltZsj&7R0VDxhlW0=Fkw-#@dXy@ zu!@b7A95+hI%W^S*JI9mhC12D9vA;dB$?1_9`icO^Puv)C+vBd<@uEIyf5rI5YK`~ z9^#E!3@LfgO5S6Bgp7W{BM;)gUH*W%EJztC!Sp#EGnYuAsq%&%{n?U&=mI&VUx|R@ z1a*oS)|At^uneK~6R^KLq1Q>g-zjw58~y8YXd<^3OxZ5wBHd(iksOFkOUX!ORB!u+=f$A>*d;LXqo()}ik#PvqOcQxo7xa^` z@U5Mxjg)?i`Azae-;PKbp!Cpg?s<&Vxbtd;>g7S8Gt!{6CPg@Gm!dqdbrnApUK0RyqDO0h8WWLVO``+2=Y<3G|DjLB=$9ia`_xPL_ArhHO^tYf=jil8$%&$eMWkI zi4vc`?|vp2)R?@>G_6q1mZ(4el)V47>MBBZ*W`WXWm}cJzboLGuqfaeyGU%~LYr}X zO59&AF>v!?iHD2!50OdOri9fKdp%8iV} z+*$}E{;UCe_Hu1u!_T<4aItl7A@gSrbFQo>^01tT;L}p!%(riK?L1{NizEOZ!g>MFyY+=aimhXD~B5Pl#LWVaj*8TN+T5|=FWEG;N3xQQDI zp@R`>{}80hh1PPy9JfV?0WL60S@XFHgl;qAN^|vty=6Q;f{xDws;%i1O)wTw7-IVo z7Oj+;A$lT+eC&q({2jXq%NZwf8%HrWFxKvW_Qw=GX5+;|faYRmnZsj>B|O3~3NX%n z_ddS!0S!0TV{e-=9M^d1oM3D1$5$Es{5eUnLBt*=8a6zktU`~x^G5O%`pcH<)x%il zT`4@k75PH#$H`DPvxY#6hn&+GKXV<{Jf_V9jV=?aCN2TCS58VA02|^dqCPIZ-x?;7#1{bN-}o zi0uuSK2r4nwDHiU9o!Ay5o65qx5euH>!5ZZySBDJwVVjmf6aLFMYs^BvXWw2H3q!~ z(;%lS6m;T)pvO`cGg}L5FC9yR#x_hBf8BPvu&Y-G!c+(*MZzTa`h*7T?%V$yJG&R< zlsGYzZp4?Y8_s}3d(e-V;|z>mx-JBb`a7IgHZbhZcV4;YyWqYN+&KEYvg11nH-1#U zgCkE6_Zj?-0}fug&mf<5UXj$nXS>6m`@EvcaNhGuIE?^Ftplon5?}?e6z~Aq066a7 z;k+W51wvBk9|O+-FN#kDC;q>7UP*pP@>S=Rw(p(yyfTGPa-t#dwoIN&fNenJjB(EM ziiG}r=M|N1B&}|&{TYjGTJnR>t)#{$@V%5uk7VPX)tx)}9i~;_$vBro~X_@fGK`p*c(6Shm z_ccfy4kG%9JhMigIdnL{Oju?TtP=+pgkUA)nQwrAeEPsq(87sB6bdBfn??76cEAp| zFgA55t4gq}O8mn|j^XANy!bhC48jd_s9~TBmfYvWp%H)+$2)KWtZ>$eqk?x*}%En;RExS~IXSp9J;Iv|J~YrNURrg*tQC773oWE%2dA{FNFz}RpRg_uvaG0X<4 z)KO#ha9-1rjzt~`h)KCbm8#yvWnIKul`Kc%2BF2HVwY^#;84=0h8L9xUmS)sI5efu zrMsq&67AV?*ESC6u?BQ53x=+at{vtpUy=Tn>%hjPRv@fb>>NZei@|TH*Pe_fyaRH> z+qn}v>wgrKRZayp#0=C6%HTf}vvC}PLL1zZe+v)J`OV#n=)i?}W&PEaUEz{$-9>27 zp&VDLisExmUlyYe57bJ0b^X`NPKqF`ALem;0ng^WuokSF$I*omA&wcc<->L*C)w^$ z#@105(>pikRtXe*PBn`NCWH?v<}230wAUWEut~0FW8dub!7=*+d&g-odQ$iK5(3Qy z_h7xtK6cMla=P5A1>046G*w|;{F2`5r2AUC14SawNdSxguK5Tff1wp(ReX7WYCr5Ogjhy&`?wYGR z=ANe%{=|N?Z*Zu2VNWTB^VlE?Ocdog(hMR#lw^kPwpNPcxZNv7g4Sid) z6wVlH{)&i*#y*M@7L64NAM;8{S4rUpV*{F;2Dw!$>r^WrA`-cQ)8U#`$0fv znZuaInX8j&uMF()eo2pcLnnx>(zYf-IaoN1od1%^SY&iYDsf*+$~R27Y08`qCv9kw zOjU%BzDgnXV4bl>PIk|Hi{z}OM`r1#lo2###z@=|#HAWZB~MBt)U+%SQ46WK zB&rYRMQY-2Nega9LlI`8$l&K}0|k3jgm`SaHx-?&M0K8 zpVK~(`KfGoUd_k~D_z%%ni5q-x@~s`2G{LYmD*i>aUc7g{$0pyv;}|H{B9h!nN)WL zUiKfmwE0-SaEG;II_xp|W(#Pq)Xsjc&7=7)dXaWM%_h<lRvOXO z85-I}-KDi;2ThPg+FW5{1GBi~x37s}lTPVLNDgi}h!h;*XoQB5g8>Z+<530+()tZK zFJd{Zq2?7VEIGFRYp3 zk*$D3t&n7nnB$*kl5`ZzPCdQxrn<9=cb(gmIV~)raJ6}nWV089VtQEacB93s}thilfElNyKiX5FB zh20b=d=UdqBPF8|xe|g0#4%;}rNMjB4)Fa%gu-8S<#aM?jA+JXZZks&=UkaMtsY8^M%zQqUB);D>DSY`Fu^Sbnz z9EH?R_5+6qyE$#m!}kwpE@*%Aj0mNMed8m(d-3J$gc?6^mj*7%!t#ONljFiJRIp#u zw`n$PCsp?OyU0~523dloHJmcFbU zP~8$~Hm(%6$A0)&fb!Z@qM~U}s(4aSiKMN|60DmM&JR=xyNS9Y5{cTQLKM`#N~?$Q zo0C4SFd!5($($SLEhu>i$`o5mG-d%t7uwW*Kd}{0RewR9?YS|sW`dc}C;Hbv9UcDh ziZCuU5_E%s?J)f;3)E6_$qeH*!BiRx(LTW&J?5NP%1SGDICsWdK2z~QIB`xW$E7>K z;_T?p{nv?5AA`?EQ&$y+s*d;QL_}$vSwe}zd#92F?PyRHRFw)|o?;~GN9$@_QpL50 zmld|RlMRz5f)(wwup+itb$P<(DYKQ(5NRdz6g_+d$jKvuobFKwFjsu#0fOAh6Kav3!dXq z?80KUg~bXBPJ0m=Vx*8_SeLKkt19#q93Pg=6hqVamD`4n}uFnm#d z-PMxyNw@NAd()E6GTWks!eGk_RjC4-b#F+Uj1@sg>J}2h;?As2y}xs3&Y9*m$AIQu z%CF^|W3A_kzLm?mJYc_`1BZ|K{dD@z{%NOMXcprWjyJ~Zm&45;17{F6_KbIZ{bu}e zZEWm2Gg^7t!&A$QHqPbkF~*_E`)9Q2{lOhWAz$q2Hv-K!375J1@D*NnHdIKnx(>RWaAK)m75saoPQOP!}E< ze1oA{77AS_p%^*SP=cQ4F^^FR8A&yRA*$-stIIql@yG$)hLVY~J-k8+UUo_X?2-UM z371>VH8VBt}wcFL?3AnC^RvY2N?V43;m0q+?)mX(uQ zq0UY|3&z$*Xj!~joxy-y8^^P}1W>JPEimlCNvW@I9L4Elk$Dq-frAANOOk>YK&1}V zyv^VeArC9o6YOa ztq(}POI+yjj9uDpkXY(L=UuCDxd^z?US;MKty& zqGQGZ=N%wsAuIB+;7gXkrXY{5TxbhO8@?u2qF;d{xFy6G{I!TRZ+&ZHnkB3Jp~xyD zt~uP1+KQa@_)|34UWyzgXZ`3-1_)l!IBlC{*+^9KIJfK|Swu41)K-aUUX`gVK zj-MbS2)iEdE)9a7U)gwlRQ}V#`Cnu{{t@|iL4fAIVq0 zSiD|Q1yX!hHJmt9k~u!L34tz=Iv!Bbg~%oQ*tDag5`PK7=eUZUS9p}s(3~%va&`GH@`wk7UTQ#F4tl7D>yozE_0YEh!wNxgDVXT z^lP-oqmXtastbojFsL^IEfeDeUu*7+J$*!Qsh)S%Q^CX+qM#iF>Sf01?38#!8=LKE z{uIqPotIW-_m~Bn)v%J~8DuZ1tiSmtofaH~-8AOB(pWEA+eHby5gd&=z^}3FcG=(Id)dkFi2JZ*0m)g_4diCv&o6S-8O*OjcG)lN*C_|DKe> zPUqJ9SW6KAxSHWn5Kcn>eM6EJ-?)%Z7=huFBnRnrPXof{k`og8l=P{IV&b^VyoD|m z-KGT_7GW-We$$j+A=;cs!xfMT>ZV1t5G~P=q!3VqaOJgQPSccUuom4x2BMF(tjvz2 zf+TKk!b_0IJ^GU1d{xf38J4LZ*TkOwL(`mC)S}%vjX1L;p3^S`7*Cl!95*8p*SX~a zK8Oz2#Ag}?i^>ipZHB2zN*k?1rwGJWr9UgJAPqSn#-g-1&3$uTp7|uwx8k2~e(-8| zjOha{LEEVit?4$=cF;Pp#g=t~yHuy&7{34Xp)vawvNKLlJEP(B=bXgCWlaP(%s0=F zg*1uI$-c`BN`@FXpiQ$*wwKU`;wzKQ@?{&$m4=l;${>=7EF$sgij8i%C|{sscAoiz zCwZ{SeHl{%nV_`31>ORATngM8mTc+X_hl7PSLVJ^ta6nbg~kN)I2DYZ@a0y8qvt3E z(GfB`Dbz_0IEfzfF1o0o05xVi51q=qcBEauB(2dke2I4vFvme2^slp8n#QjKhFSgw`}{Rtuy`-1-Rmi_v|u&`}#z>)mGp5{Ng z@&+6UB>Xyb_UuLkUQbVc0qM*${trU_j?meh>y_ZW%a&VZz8-;Dihlhk zmctry)1J_{gP^dEB9 zbgEKdd%5{4AsUj*U*LobqX^v@l7L#!+7}W_G4Jv}Magf>wu>%_A?96HDh7^~U9ha~ zFZAc8wI1j)Tuw_`c9Ao9xU*#o~1#2$fy~hb z7ztQga~5kD9qc(0cw7QlgM=I}A%{uGA(4=TV)Kwt;}f_zV{%Gzc>?jFDg8o2uT)Eu zbIVs`dx28+g7eNQ9=Z4K{OYaZ7axNjI_?0U(rTSsL~kVdf_q;?z6`5@+={GCNigDS z9jKw%ROkZ%zM_bzwPMM@T4? zpg-GU8yJXh%n70CCN4NGweY0TPknd@d&?n?V)W6GSER#T%G*x(49X+gK{n4};01>U z;;q`JNga^`YK)=m+{({7DIGu^om-`bf;kJ7;l{=RTlTN(m(hL)FB}B0bjwk*)4u6K zGWQL-(YbR#TJ5uKkd!ptY`oC9^MLbL4f4t7EMbB`R_1o$S?AUO1Az8v_gik@;>r8D zjrPrE+b$Ann0HZfu!T`Eh*7c1|JlO=CNn9yoKHJe`Oh#iUgw>sfx2^5!+?y8G*}?6 z_NOEe7QdR$V!2~fQ+BLMb)bJ2w^Uta35sVg!)OcP{8=ufj?_RwBTMIb2g*%qpe%_D zlnJZ+HJu6izo0T?RfA0iOQ#GLc{szvxIlbMX20nQx@(%G7g<#wxK9KNUw~JOGJa; z`4oF7p>eKfv|6V0K4b9dW-TpVGvZRR+H`wuPN-Hau-PW=d5%f_#k@9=3S)C-4ChR7p z^M{nV#Lmohz!!j#fXi>D8QW88Iu)kh5gZj>&Vxh4tA8+&2dS1^qwZi%Jx9XWe|uJl z2C2=;l>MeuJ(>OgO4v%5&JrRFhh1XK(pci1Thr*n)~pkFYr(5|Af6T+&jVkz;K*50 za@{#gL!*hlB6YWOtJ8`gnUY^CYavftTQN{K&;h;<-kX!eG8oSn34`Ii3+i%C@?@{e zp}H}eKc@rT@(}8DTmPDqJKT})jv(5DPmrA!e0+yXkGEpE%twyVxcx*v_o;+ zj6SZ;+bN@2q7#d_=ZH8ZFzwSKNYl&3-*^SK!zr=?8iA}P5C{!_6uMu z>r%`F28JjbfdyC%C}10`-5(>`Vn6kr&rO-JV{6^D^*Nu^dOyjo&q0H7Em@svX50TM zBZC%-)o(A0<g9vVZ z{UbHk*={a@gmH<%S=hXvoobr-5CezT7;c&ouct1DHajH58i8tvh((V#~ACbJv(=lGD=vyeyU=ORe5lh28~WP4z*#s_HE3Q}BM8M~WU^k|;Ko%bPN1fzwP=H$50VDt;~T zZJjAKCpNvsAQzoIVY3-B9b}NljBRvWn{&4I*rsHm9G)|TV5@MtUAvCO*S@_e;Xpk? zW1kqKnE?(2yNJ}+AP33XYaQ-DjkTl%URHx?gIZM9bWh^&vQmaIb7&mz%1Q&t6CnXv zvM7BI7WVDcY7U<}ANN`6{PLSLYx{j46K-1IrKoBu#Y7GEL16{B+`URV18z`Bin5yu zcd$*kd?H~6t})W=&lhW}wl@B|%cZ*&3ChQw%~oBOW^LB8Wi}xm)W9N12xL4We7g%| zDAgQIJ*&?&pCx|7^dO3_Qj9hoIq{=N9AzCB5w4u$y@XgWIcTq?Hi#~K=PjzUhhXLa zieqi+3l|D27#8qI(@UDFbXGylf4{A}j5i1a`1fF9g7T@gM&TCb2DU({2Atd@YU!sY z(EiOO>@84LxMNf!ya%JxG;pD+VmqRn-8Dq1MTAU;>YI}5{bFXWZooNo>R1u454oWxAviCN5S+ge9!p*~nCs4tt5Z_aw3 zUK9hH9~#y9=G+J5jk~Kti~4sN2x6f~mBhJ4W^suQ=Nh8UZF{8LqW3?HzWf9-Bvq!K zd_B_K=j+|p*QT|xNOA-dAlBJaThMRb!B!k9o0Mmkh`k2EhOT6wazPNGPy1H++{A5 zL^^FXodxC^4ranbMx##W#M8D8u!s|vieB!Mp=7G&>zm3>D;0{}X%>P$s#-Yxt54eN zYEHHhvu1B_l<6i_s==KPhI0eEWv40heyc9>RxXWQ<0wcGd$`gBH{l`5L!iBM4-L4` zsL~Ff??Jbqrdokmiu0%py6FY|g#aZ7% z!)!tn!gohXnZXk5o;iXw&YO+}HKnba?BjwJ)QdmAXri*(wdfLrIGi zVFf75tu}tV%dFEx3vE<+~hpHUppdnPU9AUdD@*%~N+pf$wDXN9d35AqN z0X;L0SW32h`1ugPPsHd#n3gJHv68V0+cdzxPr`#7Z?0xl(=9nvufwsYXb==`ySgkxc2S3+5<85gM*j%_T5~2 zAU0^$7TGri2ljla9bLOssQpH~I^q=WkuDgg?GiogWF0O$h%{@j+8+M2s`t|C zcG1#cLSSGqtXL&^-AzC)AueaJeC7qGEEdC|2s7xejTeE1Yy?-e8;KmnVnEmE^x$;! zJERBQ(2opeX(F(S>`hIn%;+4*DG^L#ken^ zsFBQQR=0^>EanSTn;ftK5L z#X(?L)sS_-`SdQ~;@>JA&+K}U)q9JJFsUClBnPryY|6GbZAiv4c<06xx$Ydsxxq7R zc7=8~dhDlm!*i}5%yJeVjH@5!=j4>tnGS;}#pv8{fJCMjhV&~*Y4UI75aB;-tFZ^p z25n`w<(OPmxx^uT#6tPCx~40(S=MBCG;fhgpooLJIeJ7QjoiH>cuX}6`ly9 z63$^a;>GVZQA2%Hn68du-KX zSRGa3Bn>%jXfb=VEVdzQU!arL$}xq%T6m(NaPP99%VS>q4aQxoU2IAQ;!#3moM5wQ zFkUndFj5fHrGNV2I|dAt;WVYYJmyUGC=Dlr>1vxs#X4xY6AYVQfZ zH@J;W8{%UE{ZvV}i!DkDmtmf`3&vddZ7QV>O_ST==AWew6nqq{pLTC7gHUP_sM&`? zr)h#Rd_eJMw=ZGnA=3?ZF`*I3y4o|d^h@*1B=SQ-_c+!CVpL8|Q?PwwP#P0%W$&{}&bHEhk=%U><{ln2%<%(NFhdFH0)R7dsT zI(t^AJ_=oD4x>miDi|EWX&z360WA`1Zr@l<-Ld|-jSlP}PD?-cY!_4vqJACP_iVNErc=6xh!R zvrzm*aX}7R947zkP3G;{-2w|?%zUi*duj%~Z!b1qY@SqV`^VY#0zq zpK;jOvphOOkp_q$lb_~TDs07nLbQs)z)`yV9$+pg!HyHACUvt^ev0%|7|UvXMfEqC zIJc}OaJbaU7PTmMhkGqrNRbr2l=?@v$M=`1u@zlBh8L2;<47hCMywNdl;YJMnsX{M zb|mstU3y02#Z-#x6kWlkaBvCr+f@VDDEF@ld@zRqt5U06zC`|Bu(sbSTh)-@G@dW= zCG$6F?HBO5BskXjwD90#PotijVI&!nM9}7Z`hcVXCmyaPU;1NA)+#}F0kROd zZoD8;hWwr~SV2`0vQ-hXRS~jP5wcYgvQ-hXKUWc?DlZwMS21h)(;3dKLD0$Qwqg*< zxnTG%E=Om}2PDQV4WaLLGo&M(G={jWmA&p}i3F#}Z_-DY?cN{y^Ajj!Ld^XAn8vKc zPk3vMnI5kTgFiOV+J!78v!L(q!M|`%9C!&h4x9o8fh3LvW&(?W5}*p$3~U1)2A%?1 zfY*TIKo{WZA|8+iECYPNX5eeU1Hj|JuYlKpHsAzs7D)U=(~^MkKr)a9z;KHvf1 zDd0um9iR)i2=dQZ;96iFa5LZo?gZ`w9tU;;Ex-}r1keRs09olWUg#w?c)ws(Pibv`U{;wSF!6__8Rd$10tst=6iwm0G3d)4cqfq!nxB{L{1v zT7_n)=PM*xZ9;`nUT!@KBcPu&p-Z#%)B44_>{(e^aq^p*ta(&m_jJ$Fc!zdfa&o>0 zQjFUz`@7~?QL=)crmd@5$In3sh^!6=j)Q;ls_ht^PA3EWVq$IfxPI}D{s{vT2M%(& z248UDkf9e{oHXo`;Uh+ly3{@TvN2=FjlX=t6a$y26IyKZ{QjMSO4 zzWAlI^y@P+vu4l9o_oWM^K#}d@GM-EyBG_ZOAG$#rke|wEniV|%gSQ!s#{A+%Wf-Q zT~S$eyRTX|)~sE({>xw4P_uE9BI{;VNSAslODlA*k22k;Wifu{^LL&$S-X}N%j9XE zDsQH@ci7qG)w6wGuZElJ)$@wV4fQ-H>N&l1war>+@Cm+?qC!&Rslj zL2j<)Bd=QS-1&2&UbV~xIq7rf_xLQDmOOdNz=ZS)cTrVUdFjd`y_6wSQdI3;UBs{~ z!e7_DtE+SwvgMUU4BZm1JHs8xyS(%kUy*OUyOcWneBPCM`T9u-o^o$dwU>cip%<+r zCNZK?zr5OAZB$iN`uO54TJ2s%;a6AsyrjY7YE^Lw$~Spn!d33{o?;lJos&Cv zUewIdOG>NVMb*{b)wh(dcNZJJ(u!N%6(qGria|w6D@yg!qVm!&tK<_FOL*ppRM<;Q z_btY)yt~&|8oubVPIAxH-2`1-S*^RvOKU#Ktv1SacjYSg%A)de$&8kgGF`Q@ za&?uO;uEf3S?;^Sy~?OqsoGS{@S>hVRaEOfW2H{z`L8}^mY3%gl~$;_OTDj^daLPO zQEA*-;;ybLTFFX5a0WmT(>bcaqTB15KJC?AcdylXixyk$t(Q>f%8HfVNuR$xBp)eT zvgDCLN>aX_42r|wubnR6jS98uFmifAxJ$f6RaR+9=i2K&qmFA!qavz)>xnn*yz#2_ z;?IaTRpM0{jJ7qUKHVrP@97}vNtJ<=i#c(gwqIUZA;a#)xz3cu4_^xUQfN% zddfVguB5w)y=zKWdV9i#+sM1Fih0APAT84~GgUiZquR$H$8ea{47*ajggv2HM!{`; z!=Jxh!jX!L^dgEd(CYH2X{jc?&wIP!t(L;bC|?v_VCX`URaRH7(%pHbs+JiOCw8~TJZsTodD0S?50fTM(q^)E-|AyE zt0-bcHY#qbs9am|Mfxz@gjupik4{Kn6O~{y+!C1|CzV~0(baDx&%#KT-@Q@KO+2g3 z5Px(|bU!05+5NmN>KW!*w?DG^-Ot~MdhS)#gb)Bk#huhV+|#b}@JUvvtawVr>m5R*U8zes%d|M>pb zKGpwjG%Ef-9sx0R-Tx3U{#?IE4~n}vrsrR5%;)=Kdc|G=+r_|I3{o=`5W=h=FSiIGWATesQ2W$PVZt#4=y+}ZTCySCl^^>5ts&3nIf z-~A7K`@!#g_j?a*fB2C{AA9`!JAUxPAN}~BpZLj>KmC`VJ@xaQPe1eQbHDiI^S}D_ zuIAl)_Wq`&b>IF2FTD7#FTH&5&~FdF^6G1^A9>@=w~qeq_kUGk6IwC9E8RK#-14xVpO%wzb#d|4Jn-}6Xj(eJnV55&Iy!6fE7x>C zFW|H!-nrf?j-*zAbmLZ|TGzB2jB=I64dBX>R(h4MRA>@8MZT3KxU;>t_zVuJ^6iGA z3iU`nlD~ zXta3eR92|3xklJ6(j~4&JdN-g;UtX4ca1}Sn8uRN(X?`HuC5L};=iQY>sxS38Rvw# zJ%?nWc<^mrQMI1V8FLLJhbp5=`C0E)GFlEarJ`HC*H^Af*OugFEt-7oq|AAcAIOue zDFFqcJQRx>TJ1xXsW}ZmJJ1}o3XMY>(NwgUG#tN-1@jjySv*#o#Fr{jxOxbuAhpb9pK?62tatqAe$8HI;A z*M0W)UvKXHy>EX$_08Vj`=+0B-)Db6zPY*O}qIFnS_5Aagx&7B5%Fj|K+XxZM>C5F>|~XULQoJ42xox zq5I0S)RYTwi{6wf3ajBWBKHi+p_ ziDnm76qkcZd?cynR2CcM-q{ds=R><8^qX3iQ0_B)kc=S;=CbQT6xXzqvGcq|YrLQG z|4UCQR>Jw3HqoA2?ggi~ES4OkAnC=$5RJiu;$otiDOD0TqjL3XN;I#ug6wBX47Pr# zlU1_Wr)wQjdMjmEKGGUrw89iyo^Y)s6{*4E^;KTv-ZQ=BURtqF1+KF%j!^NsTkwY} ze*@BeMFjcKvh7PMN>mFKXRTWavPJDlTro2)wNsY!ets=>Zgr*?TKcVCpNHy7*S#w_ z2#%siU~uYUv!Qb;CWrR0dbSuEH>;9(q{`ZFV&_T^2!YdEJhuWCm{9UGtvT8sEF|Ke zD{<2^JeoE{T4q63jy$(f8aODW#cIre0cl^fFD|bpfW=ptDQ{tJ%9rH1o8vM|-c%7! zO4~=3{)wpeTCB*hbHQ=GWzVOr)fm!F#m<9{7$y-inx3P~VctXE9!ak#&aEn~usZd| z7|AfJhr*ew3m2n0UE3vje)@wp?>sT`wJrAi(qeB$Ns(`HWsXpcuV1fwwcY1Vhtc|| z>IZAqXj+jy&!Ua17AUYSG`zm`9H%-;Y#{a!bEV=`yv9^2%y&c)H$cjh66wl&(DxRhtEd zUS;SqdhhKODqrg-GcQ-~p7ZO&tDIzty+F9MtE-B9-tOAw_4c9EN2H8V<0!AlS1Jse zbnV8hMf0=faV{t>=g?GPTLgPS($%zAtvJOCR$1@kr7gmpEAtpkL`ts;p)+7_G2o}s zX8-&9|FZ>li2^!);#w4{a5-IJH_Ab&!om zNmFB|{B7`Sfa6oBRs`+F{GJhhXJJ=y7KQzD!!FCSO1}VC z@@5%U>8!?e11z-K2*3wOS*0FQo?1Z4To-mX@cVXLDc_@j z5#wK(q(2=Cz0y z?uEEF;|fkQ7IzqK*E?z2CAfQWhvVLfE4V^2?kL<$+)HuW{w+;&VYjlEwB!#0!o0J0S}N3%mk(bQ-EaPN?-yo7H|V2fFxiD-~ti>JJ9)O`UEfm z3Ezf$1ULxn1%3%U2|Nls1Uv|A12zCvK!1BrpG%)kqCT1Q`JGq%b=VaC$ryH_z)OO!z2Uq0lAnGi8F(51;AS1Uf?O~U+nGoydR|7Ez-Vp(B= z`n?rQQSV)(BIV?J_#uF(@5z23B>s6Uma-|8bMIE~#`s@=DAZ}W5P$pd*Y95dWHH6e zX8H7TBzS<6;dt5w=6Z7?U&E9NGo$Du`fABS@~H3RL)QQQ-~X2wP@;3ZP9^%FH(QCS z-W(;m*z1vJ%Qwk4EBY6nF#AZ++YDbrh@D(ZgZK3-O82fg)0y z4wrw`Y#Fb_O08kmS!*o4R~lPQ{gS0sS(B@e&C%>ebK?B!W8*bXZP(IaLDu~G9EELR zr}>XjgJL_7+tqBFqZmzzG+!4A*(WQ;CcK9HhwBQB#j8Mc>& zVsB})ZG3Z~)uOOD-av>oEBZ!{e5ZVeJf~@E>L2wt=N6^ri!w|Cg*o0Dg8aUXN;Kjv z5ixre)+ntSsIcRaHg)I<#b~HLcClt}4j6Olosl-}OC=WZ27rrjY`HgpnHP=)y#XaQ z+na~}DAAzT!*3W24zbvqXOU`O0S*uh%#k9`A^1NP-eDFVg2E=!l^6;FF{EjJP7+sd5;F?+^aO$e;nNSM7Vh4KHH zz7)3C>}r@DQrL-DiBk|5y1~1_r+tRPj>^#`7HNGZ$g0TqsS?fM_oBJl2GuQ%4O);g z(+V=-B_dMmlvd^9H4r(h-X4(FZ{zu9W=B!&r)nrreToRNC9xNw@!Ie}SBq5}aI@#7A(7jyshLwYD>yb|O>C7$v25F|AlJMg%xi2)9U zg}o*EW+UqO6>2fuccBguN7PDi8}4AL+ULw_C#R|%{R7oT%nqO3Tz~%1k00JbywK!? zag$QlQFlV@RH&STR{j4`*wAjSns%R}!^fW!s8 z%m9?JLR@a4(RK2|N*i-zp$UW{O&wqXZFA*(t4Z zT!&DdoJIZjQazWVZGP-HX1BRMIEpf(hZ_aWsI&_R-t|W2HH9C(6Z& z(&88!%*{8vCCGwR&Kr(C?^O^Eqo1_)6vZZAxfXNPBFBoXv>Z2r>J_$)Xli_qVd$r= zp{U&(!hkuKdKA6MX>3mLl8M-2>B0C+LCe7 z*a(^-%Fp_cw;&7Xu3v`52XzPzXxfBTX#tg6Eb4_J_8!3DYySc~Sd;yPR7sr-vrT*f zG70=9h8M9-$;^+QB;>Sm`GjGFS+c{-?686-4X}dchsagI@)M<1s%9h6vwW9)=Uun= zXMhTG-+zwP!d!RZR~9@n-Xj{onqLB;M{$Ouft+wu@yxmzvmJ9CgLKTdpB-gQihqmr zs|J6Qc0ONmp2gB4gk9pO9+S=acKh1+e^0bn^j0J8COSircT+{~_`xDo$s!-4`{CGJ zZv`h}UeR@JPC%;t6(Wg7KA(VkdkpnLz2`LOt{gLav(k9X5so=pF0fkkkH;zx>@E%2 zhJngm6Em!q#9#!@K|o>P9gb&_scT05GHoK&GKy+()0AM1N@I^h{|Lp~P&})lOU|!W z$MaVJ)c5yrqZg2DH~dGn3kk5|p)^B_*;c{mXM5*UWSJY0oeJB7sb(35&QRn(2_+!<&hN^nHm$p8tgAYER2G?~BL5ih1-iU5( zHE|&pX4iudwG{u}%Bet9XF7%37f!*tp{)Mv%i`aKO71SD`;gLj+$IPjeswH7IGazy zK2}=$K#r8iP+~Ll4EHQ-_>zE__3OumDQw>oNpH;NgZk&b4!I}x64Qa-X#^P4NL z1St0kP+Aw}N^5_TBPqF?`@z#4KO2}=(PzM+H=^cu-xY9>R6_Uw6iXy&ZDo#t;|Vik zj6is~H)9gsx!!;&T=VC!870n%fgfD}aYJ=;Y~_g%)J)zr9z+)Q2BIJcup|@pspUNR zoHsAUzd-&Wy~kNOOIo!%w8onJ7m{Axh3G)#xk~q5{iAesKsdKiiDpCCE@rJEz2oXo zV|;*CV7{c|#ikCPH*emG6-sn4QB}xj)4nMNJQ;O^6{9g^v}#>V(%687GU0!y=9uLi zi=`@$@<(rkgmGgw$_4Oj$6p7^ZE!se|7f3Qsfh2JH`e;uBIbJ z`#g~qVogm-)Q%2r0B+MlI(Jr{7g}SS7XOxpZIE4dhV-wEV&AUN8jFd`n&R4BYFkKe za7qz|I+NAY>XEE|QRLG)?_gC+zTU4i@@$byy(bxUvzcR7^7Y!j9D!uiWoC{`lCKkc zs~DS%8ER(8HeaRMX*5l#Keo+^Z#Tv|yRxXOF zp@gb~=n{pTl>?JwP9++gh_Y6ui&0M;r53g(=W`Lu!F&s|Hd+6qNA9xN!)%v2RAvEZ zae0ZoyFF~%1s)fkuq#yFbR8R(t+2vurZ^SbOlOyDlhiC}m2A^HI+dph(Z0cg6<5T*pX;hBP-R91VLtAl@+Bpg^AHX_GJ-V9QNg#r`0S zJUKVf@<$tgNQe3tkUO9EzKB5!W5s=%29F(sZ0Orv%#N|m(b?V##eZDQ2>ZX*q_BU3 zDy;#7v&7%RFTEZK`!{P@O2Jd!6^Pb81~*8C)epk{LuS%SN@_8aD6Fmv`#(05{y|B9 zGm|K+t~7hc4&)D2GsR9AOYMe*N2>i(waI`&9fvWsNsnVWu*hq$j0jl@eGOp~Hxz8f zw_AxlW=%LLuT8ESuF#J2YXudKQ17KJ+CJdKw;QlKAlf8G)Z3S=y2n7(_ zsQ9}p!@z_(F3h$kD_Du53w}Z}pn!WDzg-jtQq&S9_d})N886{t!S%G;U|3hFcU$@8 z$dv#vs7uK`K)FOklSHoGx}@H^>~h^OudgBgU#N?1PT0XbE5a<|t;RcH2Y_x^Kqw-B zU8!-Sm=V;-Ac|RuybDm#O(^lP86`jyb%QdriTutnL}PQk9?Lq?5%x(;*uqzW7qX_r z5D>{8emOF(0TZ`Gosdni4PFG&%p*~bR5y3sc?YJHpi^*7l{T~b7bPK*qmP?nzrv1? zI9QDuNVw^453$DL(ff-hv?Gi)p?LIe+NpxqhQ0a46LyN&7KLJ=w4tdnDI{Wnu;S4T z3SvDFWMsVqE9`c@Pe_Y%Xg8`t*3mbX^eQ)cS!^GFRs62|v18H(D~*lW^ST=iLrXi_ zq%^i=$NzlBTHh?^U;*1L)jkfm`Q=cjD$znPffWtZkLXZ^)nO-u&`j`Nmm`zb;$7-+ zR^5u&TF2snXvE0}`X~$Fbd)=hqoB~KjuwohPGoc4MA-)NLzn=l9yJwacZnL(G`BAD zq%{}jU|JlN9!WbYEwlDtL&Z8A(5EjPiAklD@6`aF<8}y`(wp{Dy~CNfnRW~w-)?>$ z*pGr8yGLK0g}m0K!)e>*5ds_p!Yi+^Sc0rQf%4S>qz9!p&nX34bV4(hZ&9Vsw?A5bsDQ<;Hy{zq&h^as89R@S~KgR~5JP^cxuUM|nq#+RWF0<^L- z_7^4z^o>8s02)NJF!=Ji)RIUG&DeVDjQU{%vD{4Epxr{t?Dg1qUZ-?7(pE|P=(^aj zf%9rUHl%qq$9trOyA)={sxS~tPTM3T3@kmNwW+mt0T$&>BW&9p@@)v!HmQvO)Ys6Y zfPD3KqbagmJwMW=PEZ;TWg|Qq;StHOgm9)AZI5(mbyN(UFl8>bm)}r;es1BOD}gHJ z`uizhChrnVP}qiO$?)8+7#;ocW6SYh+ei^}v<>O#{76WSk01s+IOvO#k#@Gl*eOb% z(bk(70HnBgARFpj<3tQsoU^=0Qltf_)%hG#)>S{J$NJreP0Lk=@Y0q zbu0>wqPqWpy3tDs1nX;)VvKS7z}8Q&3Mqx|WvsoFbrHmG~ZtW9__&p3!vU zT{N0W^{zJ)@cIq5?fg}|hOzy0g#BDaLq}N_{Ru|u9vCJ!QeEvSxt$UPm$H)%|b(epDcg5CRlTT(< zHPg30YKkI>>(^vL)|ywK_vVC4L ziBpHdEH2gl8;!wY5LH^CBimVUmGlJEFCdsZvshtI*xw;N{sMBa!jlx%e~+;KnB5{p zNV3%ZR&^wJG*Oqr-VfPYjGbT~bwn6TtK^y`mh!5HIv1U^cpy&1QZR_J34)mD#4A@%^CRSL$dKg&qTwu`;lLjUN&>c%BcbX&*;44G0xgA3dO#ROuFRU5IcbBF1}B(n8_cx` z23YWXSX_m*6$@;hQ1MA?@5zCHx3B6PY*l$9m{?7Dj`1aQ)8$?e>ID3iXQ#MRN)G9o zkpoP%Lo(EVnvGd48 zyL)L^$N+t|ZLy+<*s&1nWcvd3aoT9H4+8buj4iwt6ro>jsP@|Z%MK>{16hz*e1K{+ z=NDER%%qg9T+}Cb1qf8LQia9UtdPD)fNUL{xDrtK>Wjrzlzo6^&P6k@YojG?1fLF! z>iHLHgH1qQyP6xAvH)P)4*)>@Ib)k%^Tp0Ij0$sf9mT`6Vz(lOhGZ{Ez4J-*!3LgN1 zPY9PcAY&CWLj8(e*I3eW7eCNYT5OB7Rl}a2$bjAgSxS%v_=ZaR0xEqjl^!V+;~PjD z4z0GS5r3+YN|JMpktp7mwrRA;25i9DLR=RMABCX#vLt4Mw z*$GVOA4v(D%r-0K88XtDZ!DI^<94()hi#VqyQRpZ00$~&DN=_8NdzuV z1rn*GeW}38RNyygRzGHi3Jd|*#5d_ZbEPMjf;~u)YJjQt$WnxMWqMDc6xm6m*;6D% zrihqprN~4Pn590X_moPJPsQ79>Il8(ZYe@G551>cioAegam7w783u5D6AVWi)Qc5X zioibgJXu=%X{Pj!rE17;vEM2|DNF8#T|Mz3C_&gPi8~Qe*qGuYsOJb2TypouJai6I zUt0S`W{BNkDe`yAta%M)&@w3qCGI9C@?;~A6d~n0+DTQdNWn2#s0b7n{~Ar5Raak0 zb#jsPW^oT$5gU+?W=gP_HSymB#JJ1o!x&UrO7JFz%JoG(cni{7T_joJ8S#u417xI; zlb9t?y~!i%TLVQHe5}+Bh?3b+DRxmB0_!mdmiPk*>OJ>L%iSoa_uRL1hu(9)6amb5 zdsvG6O9UQ~BEJ)X3iV#Sr%H-^3;v+@Xi{XWh+ZVszK@DlpO3f1ETeT^uwXDu8+v0J zAlJT9aYxQF zvIrU!xoe|Gb1ex zYI?EsPEk){1jY}KY!Nr0xEx`75i5ea6?t66{tZiAa3?wNs+b$d1W&h@74%Dqe^MQOJ z%-QZEknLhK^7Nj9r8e2tQfE_)Es34v?L$?_?|^EJ+$Jawsr`Y#Yf#cjt3o6;u-cy| zMIh&bV{9>y)NIR(p9K1~L2y&KPm_~C79;_bYfe9h)TI~5vGsRQsq!8CQOKC&!}K%~ zu&Ar)*g>%F!~l6cWu-}pz0`{12!i^-1WqaC*sVnbx8fz^P>5EEAcGGQwq|vy10a|RL<>7{@f@lam!GhV|QmJ+(`X>hS5<;A_DxE0sqC_U* ztZFvB4~ zNbJFEoP$Moe+!Ty)-zfGvC`Fg;k*#cH#Pet0xUO0fIqjQ;!{vdBZ7nwGR=Q^2=WdV zMGxjVO!OqJ^h&w-W+>QwyBS99_Epz6Z!LhaW?6Pbx8tFL}ggMFrjUb7O_U=-Q$ zg_uYPc;XKuP)~f~3u)RF+OXD|Ppo(8c+v_rN04nmTD48ASG)(iNne-089H|$3gZXlLzLvx zzBLRW3Qz~8ekn!LK)+{Z7>x|Tc>K5E<>>8&+Q=fNiD?OjB*lJ%=pxn~e-h8aSk@|9 zu!AvG*%@CVQofFBse)tVBzMH1gDhrCvD=UY_G{)>G7i!(zm9?4d$GL$PjPASNd!a0Il!L1|~ z1Ki=*hk>R?}r>7 z45xehT)Bxk9-%Fv(c*7f908$>DZ^_b9l%h$%naFoVChmtzsgV_!0&1GUTl6XR`pJL zI5C;nAj2JggBGtAH54vCNIqr|zOjamEq>rri0xi5fdS-r1d+)iLsoExFl5&VaUctU{TQxo3#8! zyffEufN8irXad`F8}gH?hDa9Me-F0)&`>;6NzGN zqGzx3W{Kf$d7V)8jMqucV|fl>Rl!{4r5_uBBSUP_L%!@FzvB2Z$YurPBSjfNRagJOB`#ejSq!>pg=P4p@!Nsimo= zF$l_9Jse^E*dSTD21cHzWfp9-LzheXzJ(^RFj2=G2R{SG?NAYAqpeABhC%u*{nEFj z(uaxkUYn1vU!E6w^T19!3JGwCdJ=Jj5PLXQk_~~wPsAThLnWkAPU)}C(2J0x@ezF+ zez)_vJ`^|IcP14$Zu=IdV-Km)TVEyC{U;9LAm|@61MxCDAzgdQe@cS}yjT4KiUJ~& zhMnHEVLsM|3g|Q!;kW`i>Y)Z<&W~eZ!ukpVpz-4OLjX%QePMy)z&B`mJT+Z>M$;{b zN7J%&?Mc~xQbXas#vw(LO*91oX}5kDhAv@h5-`AmOaOTL`hKwjw{bvms|m$+%)3_z z0e?&)Ko(FO1r*=N{%^GP{|``n7w;)wWnY&dj}sh%df%t@<-YF%v-PMz34ob; z1~6|R9=lcm^R4XvR$JGPj7@9^wU{u_H<2~%N}=ovlL6n=10^+irB|ay%+V2i7UTqs zg5jQr7)YHbupxxeI!Qh$`hjg<3}v3LD|Wq={}__NirAet(mMIaTsG8dS#p24{1Yt0 zPB^Arr%&s!s3q62td1@@M_04?>*yTu`T<5Wq ztJ#eFh|8elFdMT9?=yApCl;fLnoB$>yjl1`@Iw-4#WaS`6d=w60VMfI(ig$QLrnXQ*QMYAdtkkQOu(i6PHoU^3f!-A2{F9%;pOy)mEH!wdPv_PCI ztu4m-9gmkFJ7I6Bvx)93dSWJhq$!W;tX{|cXh zTu^B2F#OYB!6`N=_5>Qmc^@Emsa1>wx2Qjcv6@3|tE*+Oh}7?ay#ncXQaa1xVu&u6 z;f|~g;|0V$umVrS`WZyy-o)sl+AeK4GNoZ0N14g86zm3!liPC@oXt;>iVvB~gX)cy38Z+Tb(j;=n(@;b2+`$+U5^_u)0&V%dP@xoMb5u*S3F`}XNhd|(OU)&^= z@#fG0o_vDGoG~Du@)pI`5YoLHNlMt?3(Fb&6V~E!07Z#ibQ@L7PAKe3rM62QtuJ$0 z;mFG{V|TtxDckvC@=(#wNAoS&ivQGNxLgYhcb4eE0K@$PWdv+=KmZenm}wt}Gqu}7 z^XPcx05aOz6o&2@6LY8-<^$-Y7f<3a1bjh+-UPOrOrfY4!E;7Jxq1B<&aqMnUjaV6 zgQ)(5VuSo~(M_m0q%S^&iD75WiO1GV0uAvdkY|!ROMD7mTEsCyVC6PpG~@G-YlT@( zyI2eZQT5Xvldn*?noN5~v0+aZ?Mh^aqH|7J5^&kt!tX&U=+LzQ%^PmzrPOpr|IZkd zJIpyPH2UbA5}W=!og=aBSM+HI;LO8G^9EK1QDZRQ^&vr>b)auz0#~0xNg{AXb->co zPAdWU;-%zwHlqU?BE{cQ<>iX-yr1j!^xF@apz}Mrg;nYfMSAs^Nj|lPA_aS}nCV8x z!W{JDk5Hn(^BEl7a9@btU{TgC(x?9#(H5w}F+tuMD{!+#sok%>-eSWsIZNVYdKqB8 z5YR-3B#C^#JVc8qAeSO1P?kKDBBVp5<#jJPw~UkP;nS&(BE1$|lJ-bXyhVZ7t=2kg zvu!FgIgo0K(Q{d@F0ep!qzQ3a(tnLy^=WX&B;8n3^;C=Y89W+!dp_Kw^DkD1R_D)w zADPHp^^kcKkeqPJ2#F&TLy{@8>aC(Yl$WSogX~5|4rIBc-U_I4r%h4EC$mm!w&AcA zoXnE%IcFD*U29eR%?q-di$IG1z}8_MW;49#n{6~NC-6T|6bW8uOXLuYUc)XvwGLt` zohjh;%^4zw0NV$Le6eSh*)f@Q@}9j!Ktb=MptNeg99e7|qm9MX#-t9C=UE-`vl;NQ zx^+S`acpAjf*yLkrJ$nIO?3+mCzzdzgIjP!pfP0|*e-bu)=sd7RtQ3ZPj20sili-g zTl_YY2hzSn>^AtV zY$upwSG(Eld=%c63|AQL*Z%@Vx8oV)Ggp&WCV|><-su;J2L@(hni=jTc+saXKqiZp zVdi@R`3(0QB&?;T#E#<{DpRwOfc*iv7!w7C(D-^RX#kttIN?5b-!9S#?N?$;vgO#! z0kZUFQ!sjm9e+;zWz9SKS8${s{Tn56Pu1JUnlk{$b~G3mV(^!-tffBI+Y9R8pW3MC zhbZNH*}RzZSn_bxm;67f9R!8r%{_RS=EDjRbA*N9?F#jc;okDR#R5k*;wn;PI-cg( zSJb89(1WqT-&FZ+eb9R|RI%_bz&WFv6BkIUZn1*28-j4q9WLkYgp&NaSlEsuhcm3N zd-$U}LHcZ8ng-`6?Tms+bNS&BHjvY4wAkyf@JvbuNM2lS&LBdX<8z^TMH}BK0uFX&5%`lLE?H^{O40V6AW*Qh zVN2a*v#MFu1GDQR!>B#7JJ{0HA=Lvt6oaC5HH4`|db4;!$I?jt=Xw*iN(rm>PU31> z4Xz&pMEpsP1w4As$c0YS7n|WpWXbe42z6n(IIA9?^a?Ly4)*92)fl@z+Z;o zqcJ?w6NLDWaFg}$|76er_pqcp=rvdeq4?ETH-JLn$)K>OS0j*kc#R7W-i^fx%jKUa zjw*qt!I(@egldphkaIe9n*m)u&L8ciTFJ4)--<&mCt*7V6@By{D)lo_m^t1RZy3)` z-2$&tRA#n8x^2{krF5o;KLK$rxw{g+19zF{f&%6lRoGYf*7soYn)p6uwM9R1TASG7 zXhs-F#@q`$i?u^|kj@g&Bza<@NI!8(8`9!bbwDaeP?83Eb0HDvpO+&T1Pj>>qA!66(;5jtsI11ma(dyrjv z6T8*B{){a{lN33K2%45+_k3wGvROo4e-5d9h^z3C+pxP@YLDKT6)b?DAw3ZjIfCBv z^5=NZQ!mOdwW^b(Rr%5?#p*w{(4D&jbzV6J099w$L$>!qxm&ew0a#joj`pq+yXM?A zr%^$*(;2dD6lv^wdrka#Obd0A9=EIK=y8{tE&I1Zv};O?T5ZSTlNh?1Y`cl9)pjQy zj@5(l7QH4b7@g-#*rInr$F?*ZY;Mf}R1N+X@4&NQ%$HxF$F*-l*uqXG{sH1JUHW=< z^;VEe?7@eC*)fmpN22YpycQK(ietgU+2lQtpQB!qf2&oUEUg-h^AlG8&V^(wxpa(N z54+rZveQbj#kQ^foeO~c#>%d90gb0CcJ-5R?3+*P)CfT3;ktQ9azx8;7gNMJ+ zE=8UMEv)f?4EY>*+d#~Q2uGUf#fVqfugz)NDz6qW7gJN^TY@b*rI`QkZzbPHDsYWJlVn4&o=jg5w(W#}i*gloA!dfLB<%o@hn6G^rL&=$0-= z>po0esrDq|Ojc0$4SBT{+M|w)1i&wJMjZ|j$cj2F6xc)RHXLQV4M5y(~_9C^-+x`@?tVQ;37Xxmt05c60v3P#iV z$Vgf{DOVo++RSZb;zP{v5#VoNTL!%NnJWV?)K3Q=hJGs1F~`~|)n+w2(eyPspGyu% z=K%wM2X6@Z{|)Opb|0St@B9|HXqmQ-gu@54ekIeX?_P}p_Jxpu<_h^OPsTn3Iy-&3 zi$rd1*cuFk!H?j##nFAlWP7w5Al)9=v$-!bH!ZAY68a+a0uAb;kXx!~1LJR0A5xf3 zidoX%-L2Qt@+qPwPE3UF5_y<{sCTLnq2%u1Z<}!?lnt-1n6Fd~f7T3_Qc}#} z0W+l)XOzCC3^4@x-Oy~H3Ch4V${c&FRJd3m``s8PrQq65bqIWoX^)UWy>;+n%BL^u zp_P!`;Ov*;6DchoIufnDjUh}5QM6ao;RF^Rf(%=?VkTfkt04pkt*E)e)tE?ymNfZp zqOk8hg%~qECYPG#VfaG{`KzF$lTJcpW6MQVq~XNsBEX0x1xH=`;=~~|tA;fVQH zuO?hrg&l!*ZBGL+GLG7J2CZ1$`vDoWf++g|X}rE9700knLq}uIOKU2 zkRtAEAcNLAf)dAb2+ouaYaew>Cj3tev%z5)!!M?zb!;>L9aaFGuT{r}@G=pTK-RHg z#QA2&GguVD{+*bO#|7u3`(kKDkRsZwm&Zj*?J1e(M<@aB{glizh_{LKryGE%MD7~e zA@kFi*(;P7qc|v>euJ*^o6#(|rkUYCMCU1~W#@KEApt?Czqexhzv;K|3WsIWn7EEY z(CHWx*HDP&Gjq*Dh59i=bs26-*Ily_0V0H(t|3Uu+>0ltvN){}bKLkGfQiCtr!NQYvY z%zBPL0aZ#=7g0byH%~n$u zY`k&6qD>tm7TOUgQnnq@DKUEh{}sxuFbiIfMa3MHpjky~7}Z=-0v(0gOYu+NiN#1A zg^KQbm)h=82kBSiG#KT08_Kriu%?j@F;=T91h{jOtgdgK^1F9n5!wn*4h&HlR+hhu zABnC$eO_0)E5kqWljBov%Dr~25zJ$3RAZeM#dF`)-uJl}NfzTSAr!d^>5tkh2 z)kM}9>@Aqqy)&A0qy5#QWlH%moZH0qE&z{K{%R`(mDpWYx#k4TiiJXh5=d%Lpg?&v z{wGw*x=CgZG@gdz)2i+KDtB^63HZ(p)V<-Q-Fl$zEpHUh=7_f*4_IZcvnGa8ETtlr z5^;tNSGb^U$Q=3Mq*8*(!^Eyt#)g@ago*=OS#!5~I8UhKhUY`aVV-jeMVO!T=k=mIlCIOr3iJDjtS}? zorXhrbY>3h6iCxMzS3LMV5xXXIF?_`ed{sGrZYN3z=`Ht89Ab7Ld?B?s4#K}F=!Xo zXgH*kRYZ!=UW9>2XJzL;kPXc!t{$+k0uRy(+?AcISd`OV4Nu`4(ER;i%#NrB)7nF zg$ejwST9D^fMpnppijiBLYMtORy$=ahrXGz726taV8Lc5AN51o-~Uix;TOLrEM$A& zP=dRKS3%Ba-6}s>EQA(Wi$uVz43b(>U|z!5d8* z%I^>&DIq1>hy%5;>vH(F!no23Hp`ciLM7^W_cK5cb!?;u1QkaNM#TYizM_wr_U##x zHZQXJK|p~X_6T3rEY>0yLk0XQ)QLNUu=`Qz^5Da0osAY8)g50{qL|3C*g+ETXY@x{4~ zSfeSX4s(mL#rnq%Ia34op8D1rET=K zt6-`+lw7{`4cSU#hh4EX61~PLs`s_Zj$F7Q=-m*mc#7bF2}~k0oW-Phl>ihpdljU;JkKJAR_(=)>kkmF^|qRM`Ju)H~yQj zjUhEi}_A`llr{{tWdE9*nf9p;jIcRJ39x3SpBB z>P>8h()3n4Y4jVR{!9`pF1Bl}Qj3N9Rse5sL2;6YIF5PId*L#3wWk`9KRf? zx~Gq$$Drxs>5)F&68NoE8^C`CMf6r78}#yE@YmPCUk&$f>V%n(cx&I<<}(VWFZd7m zi-X^iAi^A@;0?RWbr?d39B@@=ul9Qu;y8;%^Q72Eu-AVCi8!(yC0p0DBa4 zfjj`nG{18ivLjG$gC+22a@p=xFMJ9wY|GiYY0i~<` z(_8VjY~Syf z*eByX=q|-cFKLzG5!tMbfgi;n9B8&y=Z{As$Fo+BBfRX!LMUJrSq~8UGK%~FtAZm|I zuZFoLwV#8#X|tp91Ed@75-jPUFybdlbo%cwB``e*vlh)pF7>dqE8=tzIfIZk#?)23 zO`DB!ocvMN08;ulR`DOHnxm9sqoY85S#={0r^1hESEWKqS_jd!xm$uZ#NOFgukd|M z)_Nam4GKDrPCw8}lFSxgLohmK2g1Tdp0H4oa$yk;(!I8?vwVC5%=IgD8SaVj&XZ%R z7v~(eYL^=BcSMJ2f1+l!I37YCBI?9A!~HF!Am+LYF?!D;DYzYS1cm81>{?`jsYY`f z?q$8@#gYeCQ{e9e4t7j{?Z9>#f%CQQRNzZ;n9Qf2JSF#pvJ0zalW%u0c7qkyc_0>- zt<9z5DdVZqaxVM7fQ}nni_+?$X9T~ApuMefFZ>%DxQN1;ue&oi^Xu=BpBMRbEz$)1w`dwsA8aKYl{WGj9eP$gIojR zz`t-Cf{YH55<5Tgpvk9lQAeD#kC-D9$i*Yi^i3kNYlWK--Qfy~9e|u-SrhWSpnG#4 z#vG&nh0^fe$g?Q#T>9*Ri+&3>3p*y1Y2A<{9d;xq7Le*K&u|}vj7m@<_#T2-fkVFi zxZk5+_zlW}+z?XC#NQ)=eE9Rj*o>|wWYT9a!V}t+)xKnNVgG?J7PoM8%+KEd&2+zu z&~k*#`HQWkkO+FWWC--#2L&gab~{*@ub~*`0iq1L&}tI@_4O!Uvyswh`KL0HxbIOQ z5(>tgAo690S{i8)PdJl#R`g{CdEuXs9Uyb)$4+Z5eh8{sQ|FiXQEl6zDSlT3$get2 zcz3#2&_J-p{wg!vZ7Qt~I-%YRB*ycw=7Hqla@^3Q->3j>t$Srd*G=+GJUK=LX1E@dyAdlI z?xPgfY84=SaWXs(;SpwZ2Cmgw17>K2kb~dT;`fyJJt=-qh~MMl_n7$Yp;i5o*G;Lb z&8if*-r5O;-&5Fa)4q0I5LDs81&vq+%5Y(cIHp1-4FCJu(6E2gfFxZPm$5-FM{6zO3nIJ}L5354;2Na= z?$dDh^Li+wJN~GyLe#Zz8ut>g3PGh=Q*5uTUKAtQ!CyXYzHW z1t6L6AoiI=pefCJ`~!-JMTBZU`Zw{A*-X3X(1T{6!!>&<3xfu3$;VChVjaf0x24!n zY*L38nB}BeiNHXczksRg=Y~77gqE70O10h8$anFx_$A<{5WV<;4wi1|?cjZ9!+kSF z^!aRlWGV;qoAiml-GT0Y*CzlUS2)(OaIx6jL8+ohMaMvAw?fl|H{3j44mo}exV(j5 z0#lZ$a=c4SLf2);BnH)RH!dc&A-18D3mmyffQSXj^+vdTfvvj|f8~{cI_brHUvH4s zsUbWUx%iKIBTb)x?-=a&`QlW9({D4s^*Q-)~AgwE~^E9?iX=3wa z)ds?QsC(y&R&|Bk6_jA&a>2y4MVPpLhlz~7eg$1Ux#}KC17Pr%K>gP-dndA|JFBJ0 zK1A~tXl_XLjzim6up2PO$XSV;1-A|(AaL`OBt6w+xLq=E4nd`~sP?cFS%?(UgCoLqVecL02N&vs-Z`>97fA%>oJ5GOdfFoTrd|eTN+q``WW%Q| zU_JZ!4r&83UC=Cw$-yrNWeRiO0!o9b;T+jy6qq=alMhQ}xQQ|d4`fry#1d6XI~m-4 zfNLmHD*!~*Ne;pj)^t-uFI)t4b3%@}T@e275bpqq>-^2g$+Dmo$DI-ae!?iMi-!B( z3r&p9K(jb;n0wN;*c&K#&>NPP11lDRIGl!(BCk?wv}&0GS)lGgx`V*A6}vf6Z7^1Z zEkRaeZ}m8Dm#q796oo5(*t+;J9I+1IdpGxjgsg&u(zFrMn>Gx^JiRAl9=d{?Tb{yI z!cA%YvRom(NjRE+9(*(X$RgE3Ic$M9BOt@2ZrkQz1_XI1m8>l?TBsq`BF~bN(bK>pr0I0W#qDISg zEc`7UA(z6}u^>V%!SoWK&O)^({$jX?EkL+E@oVw^XOQt(0V;MTHJKMI0wa9dweA_5qpqo-%IsuJbETd{ZQX7 z!JRoE`Aum=0-7{0I$YM9;iXD{jpA=!6qZB0)*L%c-Q4v3-IQDY7v20qHR=62fc}GB z-3LkLtgc>7UEP3qF|H{%!6C-|k&KL2Lw)gPWZ7#pn*MPNQjG4dCe9 zXYUkM%C}>fvxpRmuQF0y`6C4JTf9#J6@$H zTS5Npl-XPG2N|vij}IVhyov;>LaZ)=s?2Yu81A1XtHh36@$HX4iH!JOPo9KGnEq(5*d@nilpTloPGceTT^NU2& z1JN|Cl0?rw!+$_p{%3^zW7ciN4n+SI!npSpYbPz5;n?)I5UqcXZ<%zJ&Sds(X?-}) zsefeEa{1{7aFcw#2M?3Kh|6gENe_qL5$kc{A)x15$W<$-g05g5&Q}gDVjJOBfCRc9 z2%acz{$y`G{CQC`u@Zvr4mjGQe{?OSi6n#4J-tonTj++=tAJkYF(>d)Z-Tk3^&5^m&9(_YWdb$0`aO9@ zkz`ef@2PEpm#3kcvnxp5|BY%OGcO=Xdk@_ljWbfvJ&?Ot^|R)lHebfUSc^6iepd>X z>q5A%3Ae7)`H`tgY!Cqd7iQuEQ8R#nF?RCb--6F(fV!02y`rqSqYb3=8mK7+ zeF@3g(1pdP8Gw}b@ckUwXfjZbifAiOH%E$Z5$rAYZ_@^a%%Ar)4?1xb-qaBx|N9Gu zP@*GPcR_*|`!{JTDe3Cq|kG=j1q8LIA zpa171UW6rMOHsiCPR$c$JD>{WrEq!)V)w47ubqLT=Wr$!msr-*awtxn$x}C}Q^e7; zMB=kQhGfI4-3kLGDLcddPbx=AtDwq< zV-`Ojk~8EAy0dP(;y+sTxy&}^HbV-&u&8dbmw)q?VXTEbXNhK;pbAApYFKc?@=>gk z0$yw#Pgxh-pv2VN(+WF{x~LV&Y^4z%Fv(VS&~EB;)|}gdMm)i~DZTYV%t<=%tu8@} z@uyLBuLpnPX%Z;r{*b)=RBCgIaX@IcT^ffz3l5seUPA*4gEkP2qIZ-i zQLR*oE-AyV=;wa|&GiYEbAd{fKL~*z2Rtab}(9m|9;9W~-Go=@ z?SoSAgJ9JCFT91>9k@oJxFYD^vGj78wc&#+a_+W3e!iL!vTgG3(2l_MU1p8BjdJcL z+26P%BMATFV6?a*feU(DqeUqBffShor~#T3nT0?RkzqB(u)oxyH@LaVe^5)u{p>+j zX7Bz3O%&V;iIXv-lbRsx)%A~^vh97t{X8HIm-htya4npMI+S&=LeoDoq2}}z%0@>dwMaGFbZ=wq!KhCJ~v)XE4LiR)U z!97tHO7%)~2Iw^0H~bjgg`I0=XRzQB&B1M$ zbV}@oS$rj_V}(d=HHq zr}IOkPFR7$VYXxu4I>@anud4Z{&1|gg6(8G&=IpYycWesCkJOa+#!!te29fLpu*lP zhT95g!{x0YetXcr1^0}fh-afZgiX?1dJmklLZl(QmHbB_?GvdkybMQ_L6LhGX7tgr zqJM%#s)?_^l?LV$nAC|j_p1|=1C!0G6GWH7>AP=KitS{VxBK=d^y2bHARGeIV^4t% zG8}F;p~hg5D+GMVnv>&n-Th$XMRtf6b|3EBG6xG7!1t4yXh`s77P^QDRLz%-#ds`1 zLI=Dxa0Ph~SGk&FGl|~^BW7ZpSvuJkl?IALS;PJDd=%~>SHz=qTx&bO93`;s(7mB2 zVQ+>%;snHy+*_QZ__pzJzoRaKA2RSm27Va3*OQXpzULb?6?7euIQNe=c&`j~nFSTF zh?l(mgOHsY@T3K}gb+ZE;O*e=ngZUAJ~>|hEx-}H-5F%AFrXBA zW8eN_)){2SaUpzcp_K?}ItBxPyZ;U$kl=y)>#F;}51LeGbowxqOI%^N7tff@<7hR$LZ@zZTIl(6+D);k9R z=Jjg)*faX9x5k3h0Y4n?Dp5_28zUJ*}xX?=w{uGERApEmWOpxRa zOqrkLC_Bp{+h-5N_wV3-EQ?Sot1af$9b-xBM_PO_6&TNM@X|>jcKqJGDPSc zXLyB9p{voZy38oMh_M&r+klO6hjybGu&Fp*ZqHCeqWC0WXGrfz$E_(ec1=z6JwUV} z8bCv^KOzzz2&8|h?-L@J`d*+1mRp>kwBz>k*%?l-Xpa(=JHqstKo-pCq}U$u-9Q;y zV|@GXJv25p{u9U^{p(wy)Ep;Q?8<+wMuiqB$DSeO1Tz9kO=C6Q0mc_NoJl!W2k;(d zS!R1-sc9hoZgk?3j*M(-EC;WlY>LaFI1j~PHZ%q(zJubS9}g!1Gg>LOlVW?cmqRt2 zT7W&09+FN#nqMkh1IhQh{Ra+Kglw&64-mc!o*E-DK#Cqu>o-VZfDmWz9i-F%mGlje z9tTy^K*Jhu)p`dAT!#h-O26JF{+Htu%;+IZbfRGzAe;rkcN#H3K-@6185y6L9jv`C zhNsFLp1$!G;{%?x&>SC(1r1B@Fqz}i*l&Eo$@U1pJ%nFSLO27cpPfO25aJZqL2>OA zw-a!Q5u)L{5d#@EAu|WaiO9kK)A+2Voe7%fE&cf66oh=rVdfG`x!%;u+HDu%Tu zhks)RJUn3rCh?EWKpx*K0-1c584=*EW}3J1+FEwen|4F7||lg%)eE(`aV z;RXs1GsCSEcADXx6h8S6LI7*0aHkpWpzx<=m{Yjj40lp^s~PU0aDy2phb8`o8K#3$ z{6#ZN0vmtE4ChdIg&FoxIAVsyvF$}>IFI5VG{gB6E;GXc3ePsfboiPpX1IjH(fpmg34D#t?;2~y*v*)1#JJ6vuU}2oBxr^f$G*BkImq}8 zc95v7jWV*CIQro_WX8N{#!Ny?hZ*x1GX^WN>jN|9mu5^pVz!zwHD*izF&oU7N6Z-L z&|Ry|m^&yY**(+eBoANZB-^BmltfPA&y$07R{poYB^4@XtCpbAYWOQH$)uOMy@~F% zg4-%iMTm=bVEuE*b%PV{;ASj*30SaqxD!I5f#d`k2PGu)>#6qfz(`^xR_TAiSw;B2 z;5yiLT$cqmEc0i#(EMCY;Ef>ghEO6jKLerpNdap69{?TE4^Vt@6kpDOh;L{)xBw#r zAH}+~kg);KO~%4z)ea?aMeiB$_7(3K?OX}NupRee1|2gY3d|TjGo%#&l zJAI$u!-x0i`+HdYoXHRHwIrm}$M_4HG1f?#@lG!O0A#2Pn91n`i|r;NyJI$^xFH!vhdB~ zRz+%qV#92`&*#7c#XmMf^p(wgYzKQ_bb&qqS8ec%Uh30J;~vXfm^ft{^iHGC5|Gxp z3~B+0fccbtsNo)Yn=qsdgy+GfD4M{P2pBH-Q@LOG8!AnHCcnec+*hv7f`l;%n&p#>DWv`*6wGh z7>elcGgM6GH=#aQ4yN=~OPkw%n(^QZ#K3@(p8#Pqfv|p-iXpw03c54l|Fm}|@KqJp zJhJc-NFZT-NK_Psu-FCy^*wme7fCci5VS4{Sxht}F}aV$A_NkY@JN5w@>5&2 zTC3JpTm4%Xv}zM}+=v^Zb)l{|eW-B*+<5=*nR{On0<`{?{`&dOs=FXkv%$YMY zXJ*cvLAnnOHs2+@y`}mk&K6Ez=)DTrK=ZR%akBZg_BQ|69kB0a#q)PrSqiZ#kG5N( z`!07lR^1|LzG_`7^%?2uo1{c7h*QT-`}(NRAYM2hJ{$c(siHt#+%I z`nb8}3zG4MUm{f8ei{QOL0pf0m=^j0saEOib{Uh*(euO~sc--EAaKl=kKa?f%LTb>wUCWJohXU)&5?JE=QyL}l^_hqB0>TdcnYDH4h zm(hX2!PxYhpu@yqY%;JVDPG>jm@e6I?6Y5GZ~0`R@k8^VO=G{1^kgJG!F&_nV?_Au zSMrGlHPA9xeCDrNWy4@`oK&x*!u_Mdrk(GvlK~AK-n(PPg3*s}K(m}HBjfpI9%8%F z42aScl!|{;hBdRE*Zr}V5-iHNL~218G@N$nJkn*BnBoS zf11CUE4O;rjTak^=(y#zUhMEjt^gjY`A%-k&}VMUNwgUqE;KMNsILK*Z&+zy3C0Nt zot|~$L{sOC*A{}vw0xsa#%LzEbsod7<8drPd?k!nH3u9JL>+kRD7%-83nRN(!jsL`sO)a`Y#&+Y;aJL)iwq*$ zi9h0O+&kR|tEKHtZp#hsK6RNP2s`$+RzoAPv{u7>9M)hABkAL5mauR= z#mO1*-mgShSch8+3-9E$e}h)Tsqf?6EiCxnQ@zw0P9!~~1=XEw-=TZ(tror|;64&c zAS{rArPq*v-_?f@v=4>`m`@PU#!QO`KO?YKW!S<8vbd%Dd*3Yn@C&QMg&f5q98^-B z7%!8fk(OK_nxaSr#&I~D1_n>_lFi+)DOW!pz%~t(WYFizNlbnaRjepMJmienQ=6cK zWm~bZX~uD!D^?W{*ke>M#F)II(R?V7Xg;4H6ieD|`LO@>sE|+(526|4lO0`;rSivl zC@NoOFfD{>n(^#Uv`xCTyoA$UJ_oOZO9NLm9sdyi_zWYkBoxsS5)~kQUW%r0gf^gX zIpPdptTLoW3WU0zYI`KA^XiMn4P->lw zn{7YTctrunj|MNj=NGWj^tfM)^EVcirX@rJwXKeK{rQQsyP;ClUp>Ttj>s9W=11QjI<+Gy?gN0sDfuhPSQ&H z;D*cTo4_-On+*l&^xDJV$@Mxx-?#J+qU3WX=%$AaPt%M)t`u}nIt<-mM?qJ_rh^3< z;cqEyVzemV3^q${>c)66&Lc3^$jW#j%{k4SV}&tK?v56^2-GL$ByITxsGsC7Wg{)A z12^`qd)@WPN^bjpUox1pr5cmWO$bgqrMi++MLv&Mh4f3UVigh@R8!zNJ=^L_ z0a8ikSkv*9BxBeA5%)TH^5kBW;65~ed)KMNzPYkrHX=||8f z$13*ClCbtbtc_f+w5v_ykl^EpwJ6Mv4MlU&k`>|dTSfPCe?SN4Tuq*pGC~Q_*#;&?(~i=d+^HVPLKQ(^}jE^>PpOCk+Jw|Sh{MR0HP^p9^UPNdzm zkv%DdcDH{JE3<#hlX6lovW9W_PSN3O+r~jX2l9&_0cuSfw_SXLIZ+91)!kG^W!t!D zu|AwB98?Dfd8`dOYi<;b-T5Q1u*TT2BBQ&#+Fc?wl}$)t5&dN{4fPsfY`1ih7Nx+)!x(yE_)WA{ItcAEXU z(f%B`aywU)@q$nvHj25U5~Y|Q{{|1CWcQvhmN8t{{8W5f^ZR%23s)a&UwBtGA!T3K zR(F_gt2>-6iVU}J4~JWqIzrdy2A@GS!B)E2MSVned)IwN=X}Y>z*lD6K@tJWq+%GkH}TW31&>~W|(EDxEwk5=mmmhKeeaQhfl5$ z0K+Twe!r~cJn2V7!(+)qG6BnKTAHc?V~}6$JFQ0W&6>bn&|5kR<+~mhy$n&9jEZJj zVQWvqYT>PBm$WQSE}(;HIN`GxG^KWp+jF#upk-3^Xfh;1ksh;WlndVk#B^)mL^D8{ zj#1oo*Kv256eTo5_A*|w52P-6+FU>n8ge3Snb+g8`V!J+z$@dZH-E;W@J}fyP*UCb z!st8Yz&?5cnu%I-`O*@*`)WYb7Qdc9jAcTwReNA*6`j*BxhF83mLnm9Np~Fa;W+uw zB(~M;F*9=hkb53vjRp$}r>_<82{x2bV;ae-;}7t_Aka7_kaUmd5oEXofu3hc#c{*n zbLP6ult;Kk-@!Ao0=XtOiKDq1uXjcm&>mWbyf z)vV?rTZQpx$`VbPX$CP`q4NLHnSOsu0{N(>(giFPB35liM`>%`Pn|gkonQI zoCtVW3My9z2}{`4;y8VzqmMCf`Ww;jBYNmcDex0gfqLClt9n()LggBc8|W@8zcn*T zRH??+5J=lh;RdK#q-!5>%*Gi^7h^#jk9bL-MW!x)-XmU*#^~%&qT5X*c(V1SER~bw~wF&Tsg>vUeVbfzW197ZKmyxj0 zQrX#MUd{fJ{w&L}t38BZ-DfFg%Rnp{AK5~6JsgwWX+l5RkfnviZP}6A1GabmMY9lT zM%Kf=7yMWnXJPxdVu$ou^INNx4`y6eO8)uFq@)2E8%dWq}W^MPH9`EuONrs9Thb31T)qcy6kU?S&yPVw06H$2&TF0QFc%4|Lv1Mt?Zii65 zSkAn16Oz?O<^?gSw#PhJuPZW;!F>crSVir;kNjv%fobM&sqj8*YcEMo{BbWOAR+Q? zJBaqJ)z{RC<&}2-s;_k?x=|?PZ(4@N|Db$EKw%fI=6lX;?+1M+LMlw&2^~B_ED-|p zx#oML18GRsJ;vhWHv1Enx?kVab_g=`)jhJUwTjYRZ;P!mmo%kukOX^7)pF;GTp>Y` zIM&Geev?#RG-9KxS7t|dS&l~@fR%DFO2jlH5S|&dYirN!{kC)+|eqB!PwbXfWB5Uq`!XRZfebk zn(jOmOnVk4_5M+~UUUw>^tI%o+4%|DiO$^C(s0g;T9G^($rN!&3S%2vvBm>R!|GqW zH~3O6(wZZb5l;JZ1`Q!?Nq4HO^B^<7D9XYuX~lT^f~~hn{y9&tIA80MZ}*OShCBGU zM52FQ^cGYdKMp>}A%J!tX7*aFu)#I=>nNK={d@|7j#V7H)LFP%7!6@_5xY z#J@XfeZHJ+%emeW3xfAiQh~n)dUIY1yy*-6PGmP6q&yF`J0VVNSTA zC-T#FTw9A+CnX7pp4I?iin7dY#p+Tt?EQ3F9&%QFK>C#NMWrHb2vW>j-R<1VrH( z(A4u!y`URT+cjOt=4$?2sw3DcrH?FS9bTZj2pG{q-7Yk`G*XPuS;&s6UvQZI>BM8r zGcG;FE)4>^=v}U~bx#Lb`;Z6|y-U)gerlZ8ja{x&_X4^g^c#A`7P~sSAS{Z{iwPFc zZcugK)>|L-Jia3zqIlXZZ%1EUt+dFP@XMUMO z$>ET%Wjx4N;IrmLU{EF-Omm+#CsYe9%Cq}SHV&5;d+E5^dfw?o69w-s(w$_ zu1=b)fwPho8FGL7DMI59f*z-)2jeUR&_izD@%Cr5P$X>5yMUI3M&~k-PL4YM9)m9F z2sz2UY&jpZgYm)@~4gud=YNzHcyx;)giM8Ce>R5qaN)~qUL-UODt>bFrTGc7IC_1 zJN@-m#^zjXnQaZco8K})MBq9G=B56Y(^ilpIaymD-kcAOsrge+U52NTWmX)pj=NoE zK1e~WGV5jKn=>29ObgNa-c+`Au+Nj5^Q|H3wu)_McjiDoez4jAeW z#(5i;$Eq2w=G)2Gn|)!d!Ul!LkizSmUF5px)2@@0Io5~i=mT$2&2n&h{d&UXPhCWe z)e@uh0CLI~$~+6|N`Wf!r&fQVj1jQo7o_FDYNY5fwaCJKc$@whFj?h@7zPuIcpa`L zy@C`>a+9NXqbA1{kb?>^mWLWZC9VgRPGsFM=H9+g1uf%47m=xJjR@vocNH74t!GBD z46|N#9P&%sda}vqlvPs=z7|6ut-7onT+K3bW>G7@C36Sdy2DAjka@#0m<~Gb$nf^T%H>CDy3+An?MQDL}SKhdn z{Lw{Rthe@LmQW}O`_`O*8~Qyd&DOvGj{2HaO~Ohi3$5u@-+={$%rN>h=5AiVm7(Nk z3-E<|5NVeXXXl75XcLqku#DhC)A&(XDWf7Yrr$9rP)J&+ru-|0Y!?LR} zA_m3`Z}wzQHg0r19PN5!XZv5A2|L&UPm)8+p~qd1v~#J4HkP?nyIpJOAdZF;YH^*E ziCrx@ldN!s;-+mv|25pc&LOr}(Tc>>v|jcKAHQG{>)prSuK(V_U;0g3r)HfngPxJ} zu!&8LTZP#4AE8mA9{aK^_jLG!QBqku8nczLnVikl10^+CHx~WBWZ62Odw2)E!23A- z4THCPv4_CXnJEYf*$5AT4D%Fn*L&*GIINxP&QYvJpm!PfWf0IOV`zvXlA zW9$$#ufugWmNr&P;yJGvFZk9ipO}pSPO39ED(vkDdtFcNlFhv|{%{S(W^JkGo~CyW zvHuV%v)^xeKIF~W<8{s411q$XkrrmQ2Zoua=v)&?&h%=hbS<4T1cCLLx8c@{oDTE; z-9&0l@_Hohp4q`>T_$d3&GJNEFkax@7*7=0_vgg%%{bTPXZ80^+riCnyhwqr0eaUK zs7NGl(^Fw@^lN#o^BmsR$^)qRX7%??3mXd~0Z3sgDH!LXk5;fYKH^OrIs~E|lqgfd z-4Pfc`AD2;5@!T)GJ4`z5xyj<#F-YU7?Bs)Q>MuzPPAp%O%uVErRrTW;Fhww$+_M2 zn|L7T^63~D`7610Nd-%>8(q!I_y#)I{+8JcbvD4;c$JC|#5H0jA|@2u zSeE7d+Fy#I+8YJI_c%c;!?`Cv$8j)=VMp13H0iX)4XwS?T>EcOjPt+oeu~P244v! zH+>beG96^=2l3e({R%za%3Xu+A#V^T)pT4H8E3rg*meGdw8L#V zn*tbF-h`3m(8ay+^BXy2)$~==T3W#Jly%V&Lg5RMrZ#;Q9XP^wnxr&tPbk$U)`8b@ z5mriHFekmp6ald{Klr$o@V(>Sc;4iQ8gh$>^OIlD7G&(rk~QPuQ|zM!28YwCn6olm zUA>%qb>fP1dX% z)47TKI9A*F)zMg2W_uRvLUvBkZHcmZcL=4WtOK|&`4n-v*8H9T!oRNOJ8;2H>vQ_@ z@EN*r6;n6pgR#c!ik5LOu;dY`CShc}M8&6<*VITAuPw@&7Md@7o_bhPf!K$T$y555 zZnjV49t|jMkydlQuGR>HP%Cnr_*wHMUGv`@!k)oK4WTR#qAlEb(NU?`C_R$ zEa;iUUL^Wf)|%we?DKF%xwg-vZO;rhA0~;(f942GYj-ZB*qH`PP5v`SVAsD5qB%2$ zT_pqWZXs&$gZ$tD+dj{5yuD5Djw-nPU2UL;W}NTVhG@o{7m^_9p4OeNAk|y(efCm~ zedljT6$1>GHB;C1ZA|^gnIo;(2MA*g)qP_pS+PSkNTO+PvNa<1eX!-?MqMNYV_jn4 zXP4Q)GziVWH1qd5AwAF9jI$-(GF{U|Oib6Dq`!mhHQmAb=6A~yi`GoiD@FQ~t@pzW z{8;Pb$@wjwbU&AO^%i_q?Q5irZ00`L=#>@o*S34^PRFOU*3q)`X4x9p!<)Zl>HWFQ z&v4Fq=|=Cv$)PyblCnSAE)nZORje66LDp znMH~Wjp*F?&tNK3>sL1g{@1IE36Jj0uE862;Uc8S>=h4)e%q<)I86$r( zsF*4~O#Su`#VoDk=V|UTrXHCpXdd9I5R%sElD?H_Qtw0qIsVcFv{tj2gC1^QIxpzk zs^sX+p>Wz|C+Okt8o1G%$)8|$=Q9wW8C*E+(D8cUD6rBom;SAEj??L|vNeN5Wd5`u zoOa%c#D6RBYqOL6z3nQA@`ZjblZJlY#^*et{!Is?12H(6<74xZ3zm-GBXbNv`bXWF z=>=3#x$(t+suB02cjH@YI1wr^=bdHEuchRj-1wN_d46_U*SBY_SjM9S37cbDIcQU-NW89;*>RF2pnE5gbW{jxm zY&v;{asevxua78C(f~-yXeS3*sxx!RKs@f(h0s`tHJV4Iy|4KskPN#NjcJ#|9v=+| zMJ03vw~cA%CJ-<C07aQ=@Ii>V zdZh%;*|&H=)3-5;vzxxfT4Xg|t|!;)ye!Ez__22!(;2r8yTi3c4zse!=?foXzC^L3)QxW>^p<4~Bjuc5+QNEc=?>pr@tY)QxN$~z!4L(mG0(I~LxU|wg{ zp{w~zM)L>}JB5iNSk_q~LOD4fFTMh5xUT*Nl%R;~n!jqa;Vw$|%N@FOuI4u_Pt6eP z#gk$Lvh{L{kVUZfJ}za1(Mq=x8Fq{D`NnNE&%WO-^CECTD=z1~m4CKp2c-#~b@%GB zT1~*y_}Gtk8`0m(sz=S|CNB^vK1f4fH5nu4(HY>P|cqBZ{dAO*Jng z4C@!PUJ}g)Flxz?#h)nRH*HxUmiv0nH?t13$y(6kSk{?@J;oB!g`y)OsmzmAqnG^* zrEVE}7Km;J`x)3+*t4<~3%;F0=xTl0 z6AiA0+ig6@s#hL1Sho4HvyAq~E~F03#fWB)F`b8RpJi3wtl-_A3$s7AMpkF?at^uH z+=j#3I)5s`%sKl6f3D~tz*_vpZ~U#Ya{7wDbwRW&Bz`OelN|!~*cuRQsJ7}U67of% zQ~(_uFNpLK2sQTRGi(XvqY7&o5&vu3F@oJGJ4dZ6qC!dF#xf&1Oyqjdk6a9=w9cJi z-pW9WCWVyt1UjN*mafPU+xMVbpe<;Mp2ZjRDO8Boh%u{wp-Yh8S{y4&z^CdG=t4F> zN30$-phwz|fz|*)i)4=@rTo?@apo5+dKQd(-xtizYmJ%Cy=H|AL5u3GD+tD9a`&)Y z8(B$mFx8RwjsEE}rZ-}}$2>PdYedM+%lk`YUc1l9)L0gH>aKbyG}3G(pM2!6M(||r zKolQyuOU|HB!SPRFl=lfWjtqopi9O=)`fbvu4f{^UW)J_y|30g?|sJ&=k-KG#Em`3 z$spz9zABErwo{L?MkwQZA%0PEtF3ttzS@g3+i$Q?;b%qf$L*jNPP=WSaF>`2X`K%) zJM@O<*Tbc**f!lBm}qW-hPDFMBRGS6xlme7wFs4%Y?eJFZhY{_rks-SK$yuT-Wb{+VH%a9ud<(_u&mBY92r;BrY>Q?kMg~T<&UMU0h$; zK;K~L*;zHA536&J_mDti_03XR09KK`qB-N(#k2XWrU7_cIRBbh7Wv>wev zjsoVX9&LjC**qvjYdc*-ITv=eD8OZ~46c$4au5;T6-= z>`Ix}=aMFS^}!J_U`@B224Devf*6+NBEvqDiI_HIBBv9Mc{=%}neT(Vzr|WLqlLSguO>pyTWEdKU&+Ki zpL>j3{V{p1MbR-U=A+CaHnmzuthiiQi4L;OYoDqqK%OaxPTlNXH`94{asXphd2Hge zM1|r!Yp42~;=>e~T_fykJM(0hqrF!SzG)vDle{^vcjtuF_II$Qxnc;bU3PSdsN-XO zWS{p*26ODvKwz26iXj`S0DA?D_gKemlTO?(FLg5L@j@5X%%OE?&AcL8e6qCOjtD#W z(xLc<(EMoi-vA{|DLg%vxd1MMvM7i>W5$qQsJ`jjsDNB!d0rv=VmTiN#)+^{$ZRc~ zb^xB!Z?aG?31hckyh+XUxyszu3eG5Z zQZ=qeVz1_#w1@>2Ei;|#Vwdp>bFZDr1u9&yt``RO3!$<^0LT{C6a+F(Nm$whuUs!p zaI>>@c^p~`(TwK-69q1zC?cU$g4s+d@>=5L({XZW++0~6DVDiGJEbZ`80tjO7J&_o zKg??)7I;}O7dd29)4{>6HR}l0)6Oh`B&U=LF(iDYIa_dnhS}cM=`m8xg@|Fun3M63 zM%_YteB^4rK)3+42S&dTX4iI@1MNcOwwA?2O7Vd|nD*EOB3$ie#qf@wNYWkbmfxlQ zwgrad1zjlUnbY8if|l<~!8&CHDL44hA7=QnCmCbcMR5nsw9UpS^MQYt*lCv&HMg}o z){$4bmAh7w*Ezh?wgukE4StbV`fO-|C;JMAk=3{?YFgmr?DL}o$9r4Ph~d6TfAmvk zot45#It8O&Y#s*Vqo2yoFrM;?&e0o~to23j^|9&c@lOpX<3x)hQ*|`GHc*LzllcWw zE(6LOsWJc5$$?jW(I3Fpy1LBQ%PjIO@NE`I&w*?UqYZ?nQs}Zu6l>jv=xo z+KIVIOs68`eRW&38(k5(po3!nK`@rfXcugo6;|7#5!g=WaE7Z{w7ql3QCA|r`J>Zr zUI2HLzA2sdU+&XX@<)H2FVvsy4Ze;l84QLry~{uDmAvR7=4fy_s!YAKSPEF6%%DCE zu@#jbDdj;)DzMQvl@{k(a~-txmtL4zXtfVg4ZdhT_wX^2Jf0*OIxf&Xnc!fa{?IXk zeszge>)Fy)PSi#%bc6xNim+26M1LKUn?OXm$L{#)VwU^+TvjQ6gGo*ErP(}(t*#L^ zOL6DnX^Xmj4J01lB+PcIyzmQs>5C^##SeXO(O93$8Ehnu8VwaD?jSvX539-@9B7U5Bzr@FNQ%Ymnnh-6QZ zh5?Wx`J-iumWIztDCwm;5 zcP#hMW*4{utrxykBq>GtZ*TX|#ZoG$DSxk^DUY0E4Dj+-GDiS(KX0DaRTq}#YRu*%uEaqBS z%+*XpR?okc~?^MR8q*fYUw9!hta6w^M+{!3;UT2;V4sL**W9+<}38x`KsO`zU& zd6X0U`fU0X;$a?*YF+0*j`B`x3+%^cWgbV@VzN^LpJ%7!yL{~kbTY~8{`Ima*0hhM zkJL=GMKYZQVp^K3CiBO26u4%-Se_poemt{AP7=P@Fu20I>TT6k(0Y^VqSv7d#W%pt zAaO;8M+{FU50Bhrkfkp$C@3~JO{JJDvs|=U`_m!+wMlQOC@kb?S#JY_j!Z0jg%BAf z_tFr0X+SnEg@~;=NQUOg@)v;8MKs(V&y>}%LnLEcN>>}549QVDkT zdCcf+jYA}+_y-FL&Bpelco*CA=ff)7I=X$o?%lhS*W{PocJqer4@c02wHIYB>He;Z zE%`sn8n`kqwmw7<^XTHTcKQ9LtNbD-mCnP9Y1Jls@$#;V88P}UUPcG!d4f-w547ph zcrMyZ%Kz(sZE|}Vzt?T}sSTZ}mj6&2PO_ojhQ&5qYQyz5++f4IZ1|uJx7l!y4d1un zK^ruk8fhHGtjqYZy=!^dp6&4#;ec*ut7GV1Zmvf)`aEVkj5HoVq`zp&v(8}6{- zn>IXX!+x^g#c!|;$J%hZ4fAcd(1!IkY_{R`HoV)0kJ)gW4PUb1yEgpFhVdCzzC&#| z)`rt;m~TVFhK)A7)`qv+P$U00{wy6T`;%BRnrp$kFR`Gr(t>@X?zq?Tzi`;mzemDX zlvGuhm${8v_od~AyL@St;V!K$D|c7a*Di9`)z_AmH#Cf=^Xds#T3=pbl=uGTKE6Tm zU;k#+2CB>4HMNpfd8vG{{Yz@Zv!be|%w4$5sI0Bg0Rl$J!s>E@N&hInF{A7B*YQNR z-nF-yWyPiEI9vyA6|IT#g`P9EG#W6ueh|b z>axqL7uD3(T~Xg)1Qst@y6nmyEx&5TO1=Foh}8#bjH*TD?(+Kj+IqKANp^)4<)1Tm zuH~z}=H{J!X0KP}JEy>#cXp4@obP2#o{|*rt#Oys)m2xOmKar3b!AC|dr=8&Rf4}^ zlrO3?gypJhOJKdqa`!BEB>(EFh4m%%%iL8prM30-<)udTvhneS)#W7(q40BIM@&CBn_ z`9@_`gS(`mp?uN8>SgY-Kz&usrS2M%S}bT#kgA$0qpGC3>Pnq_e368Qx23@4#B?tV zT*|w9S#6-cH?HH|d4`*yi)tGTcXid}<)kjfsV{E`R2%Nv3U_Hqb+u#$r39x_OKTU^ z=_WdMLTPpVN$!e3O{u1-ZlNVTNYykL^?_1@!t-B$^i@|ElvLH|vP-!qNx5~?tf>uL zTIp`6D=DR=6TG^XY!4$?Z+cDaL$B_#ms^!Lr^uqWQ3=wuHKpa_zdJp8=aVJ*%px_x zu_u!<2?PF-RvDG_?`6Ufm-mh% z=^mRtcBHZrqofBFolla*3cZ@E?hNY7uLzVk2y(*xbL`HCN;S&s7gf>FU`F8qX$FCs zK!XrS3r5PG+mF{9?EN|$=aGlen7q2q|B9md~|#~1EK_*=GL_#n^3Av<{FV7+lXywp>_XI zE;;PImG{WlC4qk2=bf_@hkd`c&pXx=4*Sj$;9>7S?epHRvGMB0RgDb5(N{NKy}B_q zHkJ{1&6+hJo|V;D*tk|X)z}lW3+Fd7zA^|G7On*?_t?g@jl@z6!b6bF04p#v&70|N4G8+Pfdg=x_aNR!9CjJp3xv^UtBa+rQo^tX4h$qS(Iu zF8?C&-T$lW-YWc&wOaW<%>j;8-Txfl@fWEsx>PZ`c0h zx}R?N_v>%C@n=83>E>I0aqDfry!}^q+anC z*dHH%;>ka?wQt(IW$U)>J9a+x^fS*sx2xm%7hZhn<=wCBdG)nFzy8LXZ|(id+wZ*l z-uoYzoqrAO`|zWWyFU5!v(LZSf8gMkUw!=zmP*xsbpmwk3C?$#0R6Me|Ig06TJZFrln$g7s2Zz$PD${Cwr5 z%n{4$tv994u3dcC`#H?W@lrO9gFd?>I)mbGq`jvboFGc#2wjxbQkEe$C%OovHM-gA*sJSIZpuUU`{LZMa zvRz6QRR-!Cy5E$VUtU&I-piv1FI@y>5e4vABF#L?LV4) zy0|UjIdpR}xsq1i#eF;59Lf5fNH6)7+LCv;|L}flIR2^lJIl^G{F^gMIg92TmTrc- zpBmtpt>U_3_eR%6WeGl6Z0x2Ck5$7Lrne2QODj&zQfluwQL9sGeTGu!4`t)`ZHo|& zjChqX#icUlq;(D2o6_NGOR7sOPAGKri&FjSqp}>SQ7ZL;YK4jEr{eN=}w9&>_0G0 z4J=Dn1E&m810AU<0a{8NP*+hWD>Z;e@VyVek8%G5cqM5Fbhs0hyDUYyi;|U_eBJfK zyR6ztt#c&zQ^`ggXLEs*65AZ;j`W`to8?G%s`N6RqBxb#xAaMbO?9eN{8I5t#V>VI za$Uwr32MlcGBw0;flBTgus5+IzRg(|SKP1As_Pvf*x#L`+*>k~+einGA>c4rxg7&l zM%R$NX&pVZesCHSC>|-tg&ZPr^p95k9gnLh>O<4r=&v%!KZE=;$UkFJTAL$19z1#A zyL9*tJT*NX@litWtQ09r}TkY1#I zBQ*Y@PpMz>+-HYB4)>EhZ`tpTG^a{4c*^2b8n~rRN@+_u(yt?u|F6za>K&egk@%Xn z@zAzEw1viVlIt8U_@^uZK8jbadiW?YN+mi{R7R%o!h`U_AK-=iH7^Js*DAIAED)&eDCBr!wz!@_wnfNR7Bzoicy26#Hm4(T)JIEf!FHu zmAaoN5@##!Z+IecELtTiSCLD(9)MOuoN5U84=DnY){seq>U15wlt4YjQ%BU*oRqz~ z-g}pIQrg}@9Vy*>GN4$gT|6so+#E3u6*Ci_wqc~)XD+0@@!Uo@fqlRK48L1=gtrBz z42cK7WN>q-A@zg0Quew!lG+khc{ouJ|HSmD}bxFmEg;u)#;ZLV>NxG4EbNbck{%}rIVT$et3B&gY?yoFX z>MuNDyKET~z42xI8$_A)mQDO6(Nye#3 zxuc9!@*hNf4OD|>4R|2F%el8-M@(Ck-Os_k%A!XK^nedvNT|!0m~`40BUz22zaK_= zLnaTbAJCP!H@?H!7U>_Q%~|o_Tf%7G9T24kOp4F?du4w32HFu%q|A=N@oF%*4hB?&M^fOCWO&2{%?GFv*I7K0qT5RnxD!X0VKw}t`)>LP`VhOX=f3MUHy?Ll8Ma91W z52eZ&$vheQrb1t20jnP`N`xNt<@NAIX8dV`C#P)ci;du``AGN>9!j5++SOBw@pgMl zA|2AYPTDavz5Q@GB%ZPI@A1vPZAy*Y-ivQW$E(p(GSui#hjyj!9o&)HHn1+GI5{HI z6sDv`tJK?*>s-Y>{m-sl^uIj!M`$2CF$ekQ=>1SvPe0Vd7mnB{6+4Ahv*G>KaOA*V zB`Hjx92sL65Bt_yp(V2|l{(Y3hQ>un&^l42UYA^#l_I@?^{bHm=&s1yk?>#o5*Dqp zY<-4*=}TDj_-E-$%ypbuUQ=GrhS4l*M{Jf+U!A*{y%^NF`DTb#z$|ubyEOyqW9FAs z8E4ei&t+Gpy4;$Hs_WG(t=C`&^D6aV^xSe{>TNbj)L&9lR?STQ3rV%0wk%Lxeg+$} zXS4r8=s&C68uqSc)w3vDtmBlS#E#{UUmfQ9Z9_36t;K zrRsAjil9t;R$wB4fTQGDC-J(TTH3DqWtWMo>5=U zy36g_?X70VQ(dIXQYa);MdJ3(DnxDzSX%QR210-;`^=0zo-Q1OO|06%B zf@8#(uhz!QuPQ5_RasJBR9hfB$upN3z5Ul*K17clcK89%WZzXw->!^)`VblHJ9t9nIg1XybYSeajDsXxDt}f%Nu5PAsGbqsUGAdV2r<;# zy+cuMkJa*o&eGP1H|ua8!gNah`C2K%YR+n(@Q36cVKa4)MZc;m!Oo{KE&FYhkxd58Oe z^&5g?FP=HCq`pd&HN0we?wqr8^I4xOt7d_-GI|aw29hrA$%<2UPKEV;g3!XQKxv~& zJuTR4Bn+5yVF3LaX!hUr+na0YV@1-7ydSnpk{tPZY$!6e9vlqEvsK7$Wg(q41uH9|xbL+k9GYfJgMgJPnqxnbtqz_;TUbk(* zA=-Aw0MmJ5d6Ibg@yG%CIG#ivrwzqV-UU7RmcSGFCh1CCfi50NU%DpoOW|P|K|kU@ znn(OkyDC-I{m)*^nLG}|G(b<5fn&1=FiH_eazoK z0-OK&G>@&EVc~LY<$(WrT>nuy9+L%Zsq&aC;QmKp^iNIq|8AU*2iKRk!Z_MqHj1jT+uf`1W7D_A9sb`G~)(4q09v8$R?M!+Y)U z4-KNxDkevJ4#jm;5C9hrf+N2}Hzqsekyqd-U zy8Rv@w$px3zsbtyzYEfw^*rDunfGzuA6YZbQR{l{PH4VWAB@Hq5r+6dR7UVX_UC4f`{Ji?lf*e55^&x2mE0 zug7lJ)iW(R{a4{i`xogi1P948f{XA+q>T#_jZDzwTh}L6KTtTgNWA~kze3-CE&g7c z9`4B&J^J=fecxqVkzWLgTiSdM&jmcvUT@%ei037q&v<0}GK=SIo<&l4evx>nMk$%g zF5$VJ=Ruwqc|PSyChP>B0v@rh`~So5?`fAu_4!5Hzew4$`&sprWy7&Hblb2uuSMeg zKMm(;3GWG;1>8Y&*Qic0v9nTPLFda&U~v27!WH5I27l&RTGck z&gW z&A@qdl2dRm0Jie@a9<02g-6oa13YPhQu+9w0{kscG46YTKcUkwaBl#vLZ|XZ+|59_ z`%dVy1=#rm#sK{H0k1fny6f*yj{{%l5qt!GW4i^;^jP`&fcNuAUIHh3iGzCz@KM|S zIM6rK;wyoxcoIp!88~GY`;oW>{*LE1I9>I#7NjTz&UwVxr%_hcsdCG4KVpEiw*)WmePdYLxGd#S!FcNV<(75 z%J>Y>JD)ltd@*nhkAz!u_-mf^l0I+?kL0xjczu~g+bzJ;E~Wp$zYw^F=XKoI0ypspK3jmX zl~!ErLnycH7WgwB!RKb+(^XdeJ_Eeza>`CRHv_L@(Kj6)*Z@4EhC0IS2X5f;h(GYm zTC4@(E(SL9EWo`5I2rjv+Q<~(G9Kw4mIF82?%S?_{~IU^;RSBtk?_v|R~uGcHv{Jf zEcXK7r#y9p{~UPLVv9c;f%|zj;C~Q!-U|2z_X1$cN@#+6J@D2>>M@D>1zxaKEB zOlYPoxD$bSE#QwkANV#;Bkp&BXRn7ZaTfw#<=Kck^IG)AuY-SZCj%GoNZScTE&^_~-IdpKT{n-^g$Oj?zmfJun%Tf0kJRIOVB^mf89FrVz%8^A zIQS;ZoeA8^lTMf&z_WfqedEptF6WW(0<&+m@)B5h8~%h5cny!_wHA2uFQGGTfl0qY zh6H~%a2JoX>ki=ZJCs^W7=h|eD}8}?@!W`i2XNo7p$~3>r{7Iq0}dYG*586B?&0^K z>wK@3eiksuBY3U{Zs+mg#(s&4{+-3cF~B={q_4Xh_~+l#XA$Ogf%h{;;}-bC{{t`L zE(4zT0Qlfu0G#v)^GDoMfKTv9J+=W~-e|e^0M|Ya&V&(ofJgZ4An>Cntg$ciNn}VK z!E-6_z*g|beGqurcFG8D)xgVkL2GdX&+mXga9;@ggh%+{b70^_%0;~|1tz}?&iD(w zi$|9cxOg}11plSLM|dRPjliZ?!5RN%VDX#q3~qs4Jd(b^H{P;vHi7s2#iDZ;@CR?h zPt=Q?%aF4Y>!rN_<;=rN;3H6U`^7C#^!CLq@MYWm7Etu>#b2Q4$BSE_=&y@g;2E}C z;3c-Z0w_A+5=P)pZMW!ux7%)kqMt3e2^4*22`^CekHuf0=u!2bmiE2a|w diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-arm64.exe b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-arm64.exe deleted file mode 100644 index 5730f11d5c09df7758cfa1b1dfb5ced4bf2ce55c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 137728 zcmeFadwiVLb??2O8A*Od7h~(<8yAgiNJe8I*p@F!NHnq`$TkT@5@{|y$C6E&kqrsL zCc%(IGBoKJH!WkEkeJhB8^Vg^5}KeXZ@u9c5oyW-F7^@H|W-?ApIHvAs_ z-u1@Z+L>!U`h(wZ_R~(9*`^>c`BHO-Gm3}@D|tJ`=7^E%-3N@xGyLPvznb6USxDqB zV461hg)(JH^(yf%%9lyet$FYr$3*O1=HE2|Q&*6c5aT(|n3KP8%q3aL<8P<@)-g*g zpqV%Aj=9?db$nL)eD#LU-o1gl#?Mno0IBbpe)t8##xz}h=dH~fnvMCVuaiohCc$+B z*Q~!7Y;(1j&BSK$VhRoCbA9`~eBD>y;bpXaq_3use0k>rYU{i6v$uU=jj??<;J~=# zD?TsZQd0l#-@gL`Bg@R8sg8MYULck}wPxPIQ@y8Z0EwW;j7^nMEU!4}CW)K_k%4AoMX z@|?Z*Z&PQ3q-Wk+m}YzVY}+?DsgP^78}pH$sviIEM9_4G3!RTUm+bm@fM1Z`s&;eg zukuWzWB%~J!=_V0EvuKfq0koNl)o6AR(#3KvZ+OvS6p`SjM6EEmG2w(=U1G$##(n4 z#wzv|#VSmZ;2eE+!sk5KI;MK;efGPoa#>}@@|!}HtBaXKu9LsTw3-`Z{FF8h6ytWc z*RL|}&EUyT*(+%HGo%xKDdSgu>m0lN2y|q3kb@tv|c*aSt|Gil1 zu20sczSOidwK2Xdb?+V5rJ5tLbj;MiAJ>!jI?61g>{9BeO`UG|`;Pxpb^1698s}h} z2@LJeH;wxpv!}wer&oo|Df-qqJ75}Dn<M?dhe)Iki7%8fyw;uQo%gHm*(3JRClH^;Gr)I+gs{_|8LWGxwb7GIrF06h0^3tV`zweS|k5`yLa$0r11+I(oERH@!PAGtbV(E$%Xh{ zcs-uMukc)P@{wWTHwdn?@!Ji4D~v;Xu^V;0T-TEi4_BPrHoS)Crm(qD*IT&?Uyp*X zi@-JT9}RfI|7M;f#+h5ufhjqG<0NoJpJ2r#CCjIA8TST18NFI8b;>HeTe zpN7|83p$-=f`QK0gTc;Y{PIRd9_|a4boK{Jc`oZb9-P+M!1%p|@%#IX-%l}qKg0O_ zFN|Nq_*ESkm|mHSV(@tT;WO`e&u^{&C~aO(+t&fZGGJMXOsGu(o0S{m$4vHfASZVB z>OxcXGWExD%;MddvGVAZv8}7RYfk+G{IqLB>_(@f_4BjM`!1X9n5yr_O!B+0e0i&6 z-L4HsN8ddRcnB$8JH?u zuigsyr9bE#{BDjzePjJzJv>sC2QTk7W{+TB`ljkPr#dK`4ZnqD(*3s2v#DQsOnRhI z?c7G*U(t61U9-m`^jUIWfkz+FCg%0;Rnevp8Mh0bb~@%++61y_vkQ6Sr~7niwsZ++ z=yHummqJtjUF3$~RJ-5J@o+v$Bl`PjX>7^T5f5I`Xb?Crnp{*^kRNg<<>lrCX;`$V ziJeCW3+HXwbjX7DKGDGhJvva%6?!p&IP4n!uEy z-QXpgZm_5QTSdQan_(K4U1p}lk2}XK4BA54clsmy8=MvMnqNw|q zmHc8(XsB<}&ROR2_D`$c;2*_?Bm9)F4;a=IcK^~PUlaDsIr*-OJrm(Kn$IO)Xo7t7 zKf>?4^8eZGFdicO+61TI&H|L@_s5FGya`?unI&4uus!RH0(^#$n?sc)kLemYv?rv(k}W*2Y_l-9Xu-#?|dFbB|2UhP@y8 zNSc+WVeD$|lWPL!j=c+wu?x}28_C}P#7`dF^W~rK_}dM){lSW_ANx-4E18#?2cEvy zr2c7=*5NF^Stj)r?nZmuszRAeFNFM?*Y^5K*L19+^O`DfN6bW`p#M9 zj%nRnymQv}uR0~KQTAA{G`gvvy}l2->87_w9)59Y)fTn?!rLQNFHAE_UZSq?`L55( zca%J#V5}>WYo=CyCAQ6_Kha#1Ty$@Acp-XJ{W%V<{C@OtuYL$O9qEyUeZiTX>3g&1 zQhzrgKF@%k4vWvSb8xxHDLdGEPkBR#w!=jxSpl6+<(loE%4x5EgEHxWsXm=&lIqh@ zaPVu!!MEtI>MCy?!`Wo)j`Gc>wHci4R6cMvhMSU3mp-r0;O1`n93a2y?0_CG=9!!0 zqF<@wl?-x4g0&fu%1vy|XRPR8Q1r!=L5H?0MSHZ!Y!M=k9vMPh4BFo+CEiGPS!cG^bqOG zL$R*7W72i=x~p44rYk>f5 zKUKaY1Rh)H`%7ykCy%cwrf+%4etb@vkKE1J9#{sC=a_{9YyRHyS`68IUS9BVyYiCf z1m#6H;Z<^CIsLYMg->_WW?lc$CCfwRkoXoE2QBmHZ~o9@89GMJ9$jL28hqq{gX6$4 z3e%ct$*UL_r@3m3w1fBe^L!sy$<^2C^KtO?3CHXeZPfq&4P2IP&6AUoeS4WF>D^z# z`=_VJx+KGf10@A+TWlK>iew4(NsdXDOaYHob4)UfOoQKBi-318ezf(Bm0e$t4ZFP8 zef!tV@HYA?-U*U6LSCgQ|1Fef?ArWq({^@####Ci=+ck;Pn{3f^lMhtub}$MP|DTjO7Lo>67iVvTXp#@3Z&rv;O1wC9!nK ziFM7r>$2iV@4Df&@W*n;Js5SIEwKu7s0kRu;NtAPJ)XY9Zt=?sZ$D)${!hnj|8l9> zp5lr<5&b}!Y$DU8a);p!_Y%|4f$pCd5KP!XyPf2x$-9$ZhxBZ~EZoI?7r%Do3VE=J z7AOCex|ObcTlxKpHgr{eb>wqO%=SOw`4B&J#6rkPcok-ws=JlC`~ z;RB7+w+MMU^7Fe=@N7jzK~g+pn$aJ?-$Xz9%G>Lg!WT++YV(uI7k1DK*iChrvO(Tg zalePQklWFp)82aE_3XD;x&vS9&J0|2$eZ=Z#orYF_+Y zR{36h8Gn{39|xu;%D)VJx00viTIdHX$m!@YWCgr`7`gvEV<-6$_)s*zlYF8Jc2!j) zwD<{S$yYCY{eXG}bBI1Dui91K0qMMDvFbYV|1J5I=j^@DcAP1QrB63JYx%8@^0nmG z6pt+pU}ob$Yq~)e)=yd(-bgO1Hs}>Z1A$33g3lixoy)h>i#M=Eu-{@E%bomn4u0e-KOF0d z_FTE8pfgclhrB6w3f9$PXVlFj&sA+*R9KkLK1Kh;;V^V9wqvF$X7uYP28!E3aWVBBNBe6k!Hwidc|NPbi} z$&rgpI&}7EZy)+PES-xEq%Y4`I!HJNh=LRWL=Y*bfor32=KZJ-^4nu?$6%)jHT58_>rB|XRi)6Q-GgrC+sHdrSf?hduWs27ie=6 zw#;K1ljhDk2iZI|G3)HTy_Vm(ZFc?~ZI)&bY2&Tx)LXk84NwU!%X(rWJl^tV?SQnkwOcY|MG`MEnT;z{7_{wS_z|4p^?n>;)R^(CRfMaGb zzg)KqIK&5boMdF#3dUgxwAz*zOMfhL|82(9n)SE$g>$#fG_h|x-gv|q!G#rt$&w0_borG#WkXA8^J(mz4cN+i(67B8sYNHtMyq+T9sS!DC>vsq*(f-? z^;u8$T(BK%Q`=7Se_lKCWA$Mp$H0ep%=cIM{WI_hw#TSVl*!-Khc6{NKX^FlR$}*v zukmC1e0!dsV2;LvCvV+z9#}Xs%4R(`-yev&G+tS>%KVFHIQqndMCp_*ll6xc03wO&t4YU(jP47l+3$q zc;w-x+~LKNvA3hQT>4)**~}2{^^iaQdTly^4c7GNMah5sJ!HD{FlimME&EHhnCM;s zp8J9`I%R*U-Pt^!M!tn@TP2-uzcKQ#(vOiRNE_&g&X;G$(l1Smr7Or2AMxaDntbZl z5M|Y8`P0A&KA2{_#uC|q~nBMW?o56C6Lwx)N}-Ifd)`kt6CyugW4DRM+3Rv3n$62I#?sShQ|s*V6S~Ob zgcDvABE5=vYZd&NKyH?VM&|~WH^rCuu`ExH*Pp)p3!8Fus6VoH7a;FrT5(_CneU*G5P&(WMW|H_iLuTxjAkkQ+AUjBsJ97xK?*G+!k{*cdXU!HkVWBj2Uv)7i}6iE7cn6qpR zgAZU?SVr9!Q@1k({l~rLeSUt;yZkawfmdsXQLY<$U@sTP?=f9x7`r8ubA9;EZO7_V zwV^RyNPlhJ6<#0YR}>uh&pRj74_S65^*?BON~JTOh}4eZU;td1nWn2e$IR=dzj`i# z4z?X&5l(!#cV)pXp4EIaSG2U_XKYTLZI_;=z9!Lv`um`Tt#|iW`>yaX%ku-M5B|K| z@aNOr!q{`I2fGurFfmW9cV636Z~G5i%&DHQ`_xU=pMZ@wb@`_*>KYH9&rhcte&nrh zlt|BbJ`VBBkx$>$bp)BL_DX3_u$EN>lhc`7$X}Ow=V)(J{q#rFW-obZ=jH>SzNl*f zb3@@n@V|jQ>gnnI)@D*aeh4q?`f@O*^Ck4zabiA>q0hue7!0Ko!DD$aU`|~=BlhMS zlzTmxEBm$ahD+sxYp-{W=?ZY)%{>fW+7WQJ)Z~~aOQ-JKaT0{?TIHo$4} zOTadOFEaw(PXtQPKbiX99BHg;IlV;nFXyWIpQcZ}|{rszmndrrzuX>e7 zb*s+jvg%xltRG+JX!$or8rvxA^YTP=9HkR+c0S*TPKx6>&HBYHQ_Y?#ep}|pQf-bo zHHhuLlj{Ii*_5@|8;SSTrm%lbov>-%yH&hvzDVzuPWW!qWW8H9;k(^MrV$^);n$(L z`ki1bRN`xA9VfXNUxd=i2U!b}o&SNdc%z+I3+TKjF`0GCAUsU0LnXZ7!}bHan)d?WBvJN8Xq@hxw%=G$oN!cICK@aj4M-Tk^csH=TeERC;mPXb$W ze_*<8FZH_eOt-k$NgnAdNvcg>DTj3@@%Fa3k&u5u!y%pl=X2hJ=j3r-pqKO z%)Q35l_R6^m}7Dz#Qafpx;bNYhTKeAVOH9dOd2}fZ##rNV9R0$_-Vd8JbUl=?R>F@ zxqm-%_T<6lRny!3vP9zb5MBG0jM(}0a%?S|VAkKDkO_X8KRHo`md zHz?hvVaM+zZS;ArNq@ll-~QgpMt=--NG~^ga$T}@0QqaulRUe4)424rlhVGd_2IYq zn`=hr!S;F6*z@Uzto3Z-WBqo~O()~ng>m$_8}s};YIZKkfOFP53jYZ*gr= zQu=)wzYWwgE7X3tntZ-~YkJlseP=RT85Wb%z9)lu>kQ9 zb4g@-9ph*tx|B7PUqSfXvqSLfQd-ukrQae+rZnfwCmn2P*Igj?&^ z&a$iSVNLU{%RC!IdQo}5{ev^GAxo;$>Pn;d3UmOyn(1 zFBon+5{JHRfhngLW5g*eO}7QyA<`Pd1=8CB zf%F2>sr&FCX_Ns*=Kai9d|$WVT;-b9Ze;kXLeqNPj2rI>>%PjH@hpSV=M z@cgx!XG>-a4&RsJkDJgnOC=NbhMJrt{C`;Xl6&RSw0pMphvzC7?aB)BL#z>EuT5zc;X?xkU zena>xD=G+GYNpO5&AmL9E8W>tdm_8xb?k+s#8-$P7K3B)ta##M%-ztX#U1cz2p^z)$B}u3^8HuYG}a{! zpaXXEp7@Js*A@75v8{JmzGDu9?YkM7C0c%qYlymPm6lSC zv+Uy6fD;?3Rcq>k^GpWL+u=FsJJw0DBc^p;thfkdspec`ZI!MYA~x$ncJVp$1nO^F zZK|;a_7EGGivNX4X{%rOcOQL)`}r;(4)j8DkGWS<)f>05WFnU(}}L!&cra zNo&DAS;>_-cRI>%a1J_PB#;_eX}U)C=M0Z5%N;&JTO<3;#*y`;9dW8hY69tzWr6g- z4T1E~Q^C~8ww$g6c7xV+`j#zCADyLmi&(YtbZ8zFWZcll(8kM(m(w?`_bR^XVPKRz z0sds)tHgycPdVLCi!L0qFCxfGVvo`;_PgM_k(dD&+iehgZUA3N4SpEW*2UgeS{$3a zAO{~jHeWoG_tzIAA1_4)l{q)pI7MrlH13H3QQj8DXOw)adOVviN{ol@iJ@wY@Q!tx zbyrSJ?1-0jFG*Z#Y%Gq({#C$L2u%NDb1Y4I!+`_wB{dH@t=-i9J>V<^9;LVZB7S#j zWcuRKJpE2m^+(Dc25B(LfFt5-zZ0Ci( z+!@n@o{o_|*aj_M$L`;T3^_);hInEdJa-pkfO$mMQuKuE(Eeq_F`$cxDLM2VbSLts zOExWd+51z4yW3n4wvi8a?(E2XwvTLDs-%hq) zc+3wjxSAF%O^5T$o*ZHgT&*$XK`+`#LHG8 z`!(F}OnI?l&4<0%Cg}Wdl=QXC5e9<9!{o-cMxFdEEAh{-D>mudHm!Q*wne7vAMi8B z=f}G6i>_SDbI7@De#?B*6`^mEQ*9qKU9&Rvi|3nj?ki7^EEIi2oALDQKQ+?W1l^vd&cAx* zZ+m|Bi=p3Suaj6=27knnyQ&L1?!7t-&x12OJl_o*nQsH?#J_p zfk!GyzYs0o3y+95#3Qm9RsX+*M?C&8qx_-$FzV~9EdKaxhCe!9J)b`|zYBlVzUuKu z_BeX{Z+_JM;?DW_c)KjXc=jC$hmYZJ5` zZyzb0@vAlGJS0sob5Iqww3PzHapJIOD%SsV;_Tq8yJti3=DW<)7tg# zr5_p(>IZ(p3Hs4|PCo|F`~KMZ4zYj}!E$*PEJvq%eqiBYJS_fvI|M(~%yeulH##q! zD4el)5d4R(G+jS~&TPVOU|f{eR1?dLK2V-`!HmTrbgA;VS?%r5wAU!Sv%VZbU)Q9Z z)>l}gYUfvT1@-}aSu@XRjp=$b{wC_I+3mDy9WaEyB!ccQ=Xp0Y#r{aTg(h7IJRhdc zwo_X7D6lsm#zxK&KcXkRIx-Pg^$7rk1zhJeYZo5o^!IQW7ypie*5 z`Amp?F~0rf>r|LzyLeSP=g*;YMdm&hG}|xB+_S#DU3QRcq>yuowT)`wL;0~(?n~66 z`C1F_BW1aYZ>x$pGv~*#q41s6zYf1@o!h~_dyRXQQ`}sL=Q!_MW>2ad=2>;UmeF-} zMabGfZFO&cbxQh8+Sw`{ilt) zX_;}?OH8xIU0uQdzsB7XczU97Gz?ze%Q%W;jiWBgyq9rw@p~9Y8{syMBVDukSkLeE zCq5pZSK~-&8b{;b`Oj)k_iCTZdF?CB&+A{a$C01rx36*Jr=35JJiokb^3l0i=-m1G zU7J=7pJBal4>tN9uB$&1>yq8ugb!{$`xG|3&!kfU;)Ss_J0$avO>u1E%C9&LvXwfC zlc;m1C*$N>&NY(Rzoax|@6b#46X9Uy5yvPS`*m`7i74%AM-qcWGj+ z=lL`-rSvPJa`Ww+ey{J>k6-Qi^+R{%-Yg$O3;83tOVi9PPeq86Q-3dn(>pe;db^qW zPL`Onw7oYcvsR!wb66|bOcD=(#*f}SsZp&=)d~SBk`a)kTFkPRe zPk#tL27)I21=d8Y9znNCkH`;_KnFELzoz#QvjqQ-;_6ZlSN-(gFS9u-@AJ@iEoD|> z*S9f0_4BQe?~49~oLSZ`(fjxn8N2%-{0Y=I81U@wxG{^{n1A?X*G+D(U&s6_wDFQ+ z`Fm7W`>0q0AKe4XzMq5f3{OCJ;+>v7C%)K$rz`l4pI2t{<`!rt{9C@npK5(k&bhq3 zX49&>MTbpU_|{xnxDJ5hTZOZwPV#m9H(~0qyp11B>($xxz7l#*RDXQa(Ip9dq28Dn z;}PlcY(6<}zU;@-WW`0;G0NC`Bj`jw2~V!N%Cts^aacYDeYd=G)W^5sS(8lQ(_Dnz z6~bPNg9GtF2>)9P{%XXGw#;9Y<7LqSo;+`w!R5ujBMHL{C?g+!9LE1c-N+#&&dCAe)_6sz6so$fj=T! zm9h_+;ioI%Q?22gA>Ki9;{kk+mL`l_)f0giYw1gZJR0|hi93A=o)8{F8)p@(tmM?u z**a5c2K#ZeMpM!W-w^jv)_FC&Ccfc5Izm}n?$SvOgR&v(z4*#F<48Wn8vHv<0vq+5 zk3EPF`vzot2zk*5Z?A>7#8cp)u_bsePpyr%cRdhbyaSJPtneh*;`QAdx!-8V%%jB0 z5bxuU5%IY4{1%vf_*sw}h2LK%e&yBQzB0()@Zc+hWA>e=m$WYMzyz>wfsYgUH&lkq z6UFQ4!zSckJNig#WeeH+vkiQ1fesq`j6dxk*_%QSMS7+s6TDl%RdDY9Dl~_#qUA?_ zGxD(dH<}myWb<-WKIPd(9%O7ZaRGhh`(@lxZ=KO)ow0*GzTae>;x}4rbJo zSny?h`|!cqvmf2oOU#11aqVzRS!`<|@d{T~6h0z5|F4`-GVwC8V(1^g?1jqA&6N2b zW#VUI)dS4Q9?Zi1$_(yRuV^EgA zzP`3PLY%n9hWxfa#;0=^W2cn59($k8H6msv$eIeWx_^$=AWZTB{90l5XdNS#` zrjO_JyN7jd*-?JKH4j#wRZjh0^Qea}H>)4to}eESt&3|tTx;W}Sr6ZyW2z1?o}-p0 z^M`tnvk~~sRsSTDozepN@#?T2!|>C?jL&js`cT=thj(ZV{aX0vIm(OoLQWZbWK7yV zi#PQQZ`x<-Oc2YnFED+moi=R0kZtl~sGU^6Eok9cV`bv_#fjsf(!`f8VBJ`KLSJ*n zMjm!?yUK90iqkeebTLWcXq(^>CmCNGE?79x%+gDwE#Hrqssc#p! zNC?lF*ayjs9T^xaSS#|MMV&VFMH4d!4 zZkWTpq3@C@OGifb46HH9{z*Bhfw}nU?`Q4rG1m4U%S{a&=el3_d8y+;CwYuB8T;or zN$trx{JQp{dV8x3e`oLgp(j^96X4u7;?#mB`Rfymnf=9wM4L0v2A(6%&aCSL&Vk=2?tqxS!I7NQ$U5k~Kaco?Ny8&SXXD5=o*M$yBR7$!A&`E3 z4(Hh&&rOXymD4qd9)n+^x3J#)&45|7o%p!*jMct|rRhDuYGV~B)5{rNC*XnXScOCQ z_XaLTFFK_|70As6Im3&0KD%Iu{ltwYd8b%m7DV@~#~#{;?w(0M{ruvojzH;_a@Ox9 z-?qOEO)_=in`~73{`;+2aCE>6DgB`}%_prs*A!ia?+muEZ@@k z5A8<})Hs15WG8!OoP%py`j>Rtd4gBo=0(>j5BS#^7O+vf-TPR1a@7*anaaLZOQZ)X zpI@~kj(=3PRAu+7C3V~zC$v&~Y9l6Ay`1MTKFm`*kDvEE3LPssTQ1Vlx5OUP)PjEd(9EL;te*j)(&zwK+P@Kc4Pdhq*&7@eN4a7Osp(l0*a_pK) zFY$@)^4fIh7whj1JC_nOFg8EC7@Z=2sA7i(h|~BL&)T1=veeh;N+wTG&SgK7Y@;G@ ziLd&weAwcH7l>=ju3z{&_nCR1KVLd0&dNj;Ptr{LlDF$Q0emfTZlx1Uu5t>7H0SJ= zUdgR}hVikkop#wrIkX8Hp$85R6q-F{$lASJ_f6ycSn>j&wPBi}QKZ%!vS%sfaeg81 zB)dLC`j@anzL;myrR)>hIh(kg5OXwQHs{uIh6(R$sH3u0JW*iJvjZPp%k#~QFBZ$b z39aje=VJU0guH!j?eN>?%zZa7qPy(=H^Y4>bN^a!TC@V5j!bEOWZRE%K+@%Sw+xcnSAIQsUFLQ6(%iP=cGWTk4H|=*!9+sY5U+r1}Xy7%&vUVslDH0VR${`8*B=yNap&73yP z+Vq2Q&Qyw)nPTmmj$ZZLj#c<~6a%WVwp`Je9h*Jxny3!VXH?Gu>RFGjQ9oouB}Vb$ z?N?L`Toe7Z<1-VBBe+#(K5!Md?e)unsRjAqUU^^#XEHzM!RoHxoc60aUz5=VzKIr#aptCI`K^YGi)V)uEm9Q)o! z90c-Jb+^4*zC<<$`3@1=U7w(Ag#3!7Xwta6!hzq z6A5l?DEu7VTAyM~*W=X$`y}vfO|^R|kOS(w=0htSv-kyMY9f!dXVNs@1+(Ngdb9Nm zcqyQc75F?6uvOG?GxXiV8lA;QcHg6{tGClGHX6POv#`9Kv#Mq+x3Rw~(H-h<6Xldg zxZKNmEI$6!<~cYgkB`IbHk6Ne$?1Xt+wwdZp5Walcy|<93nxB4#BXVI{FrYcbKcrH z``+H(sy&6QnMh7>b&>T0*f@D_&n7Xw(Ysh9=^)-+I${v~|AM-F+mX$e+mEoW*+5-o zVa^z1+-|zhBsssTUUla&|4#6Xt$Q*uI7$AnlMB)}q=lb2IIM#X+j##H_S(pZlRCb} zB&`mjtYEy7c%|3>xHdK0K2PiXAht18OFCy#mc5X6zr4fbkF!E%8@)9!4_D^2Z` zbFa8*>0ZUMX744)mT$sw;+w>i&<3#EIHITVCH)xuTYU>(Fh?&g1TNulDsw`{s!fx< zGawnf+oapSiLRu)`j5S4dEy;#Dw#iC{#`)NcWNA;0Cx4=m0UQ(IWW1KcL3Yyd1-Yz zkB+g2U2!-JKE5~EgYyV`fpx~>X6o3CJ*awGvdcN82d9(2hB~DW#m73Q?-9z|y$9G{ ztR*XMyYs6>^bx|r>pbt#5;c)8}5KM!cjNR zD)+Zsp}}F@|A2eF6E4QjL3QR7=WEsbysG>Szh%BKdVZ4UN9%>wCEsQbvf@*Lp^iTL zXLaeEu1r0}*oU3652>gAD(F~-kLoV$YOl{_o&S|Si|>)`dsb5q=U_$sdR_ttz*6j& zzm)PZXrX8E6$i`NzWp)%iPLA!Otkt>GF0z3d40#0Jb!;iHs7f)?r!>mKS_H+nxIJo zG*G{?aeF)UW#hJm=kxhV_U;&t+&}-__5S|2dK)IHH%nex-0#jIo}O<6Xm1ntVcgUG zhh;0xMIJCNsf4E`Dcym<2&!j20It7zd^=Q=kp=ts+D0Cx!Abol*V=mXU4PyV%0V1Yx>?;iJsd4 zZ`5RT=}vU$Yv|BEV&c>ASPXnf_v!a4<3{BYl#5>$vvKkX@Z^sP-;eCUbN)VLjnRlH zJ4j4eGJ1B|k$nMY==J*_*rD9C1){fhCW zafAJ+JsDjiJfA-=^3PH8;M3RzmXjT;udg1+GYczeSA5{k%ViJv&V=^HB;Y@6UwmI? zA@81-y}1%w)9TKYdpXBuQ=k(KANVO77`oom-HnftVEf{r%L z$xGhBntdYE9`!F&Jx)(Sax>R(Z9(!+;A@-4SuZxNzaaU?N@ENOMwN-shmO^`)d}_h zs6Y6}t1T|kh5jC|9Aa$5AJjDi+hv`929SRag*Tu6q4f_*#~%CRK;>%WxZ3_dc^(fu z99;nY=}?2bSUAniEXb(Vyk~1|JJn9$!z)(qq^W_hVNR=d=+#3~x%0r|k2oW&!fn9D_4@1NjGpIU`(Nul zYmcq=lB#~>)^_`@!h5GQz4PRh$FID9-q}6N?$r?OBAdLt@L=>9{S(itjFE3ed7X=s zvTJO;yT@SAn8N1SW7D(Fbdh}^{!*D;{`=;!a@k{{S3H;+o2OyN5i{oLIp+IboLuJl zD?G!)_WeEFi*G1z$H6~PPl7Sv&%f3&|I&ES{3vwg!5ze_J@-JS9nPIARXx&w@k~9w zuIWSmS7Iyp_UlW9hJ7sF+*z=?FD$qF)LhS|w6a0-O~1T5C3bW9T~qF?fR5$Y#9FD( zjw$!%_a{we91FkFFBfIC^IWDKjSto5qTg$##9A3gC$C}+s*ts)$*e(jY`krF66;V0 zDkeP=?T=N@eYv~(CTC`GH#UDwIMy2G8i@hJ#*6Mmr#04?+SW~^)e(Of>G#$EQ`GU? z6_XyRd)ZX$%sc;`cv^F_x|ya}c?F~Y&M&iyGGAb9V|{fzm6O}BN%t|fgxAvkOOvOOnJ+@mr`VUBKzI5!#DS&x$vCn| zbCzA;vxjH+y7O=5NWPl%dh!yJGkq7b_B+%$-ZnP;sSUR^H)-Wih7KOxyfJZ{u`S-$ zesKA#6X0Sqzk$CDRQ@C5Sv+g|lVv|h9(|OxJGWz5*9!8g?sBL6&gIVLS)4&QPqbLi z7~ zmuarGJn8kv(J$y_#%I^TS-&M` zmu-U&PlaOmWHxQv5pqhtm(Tfjw_rEz3zQ7aVT=WswEWlqcO!P? zUF;9jd*xG}_h;qlBv0uiv*%AImuwM?uQLX|F}Z9DK8wS@4NmUta3+u1M}lhr`v}_Y z;oPX?D_=9i_`R*KQe_9wy_@M*K0c@w$Un&$ba7g~v6b}mpkva?r}xUYoIEGMQ}5LW zb`eDEtdgwlgkuj?r9buR7I7(dkMjv-Zh7`_a>CGs^vq;8C>HxV+H0UH!rpV_No{ ze98Wqa*}I($XjclWMrGxzGTnHev!L(T_IQdWIg#SeM&peyTIj! zO^VnD3!7w*#kHlJVy}Tv+*^<#*2RkLquFdRh7*%CtQlOxx%0FH0x& z{C)fUMANG`q`#r(S10NH6HTw*klw84*T>EpnW%pW@|xOWg*(|3+4X#Ef_k$1^98N(vj6Bbb!6r;54xGNn>8jK>SK?QXHVPn zF#YwhbI!%++K7KnH2i|%!-20Iy|lwin?Nsd&N%+f(eDe+8S~M< zDP;S9hi`mc{weO^gRSp2PUG?mzcW(x`|q%3%lLM=$|v4?cBIPto<~XN zq4{zTFiWnT4sPG^WngP(4)6PzuGOCWT(j_a@M6vbn{L1R))$-=jiKWOSCx3f_M!Bg zK1A^GO)C>@(>m3U@6ZqR`7Zi_|6=Rql=FOi)4)|(=N}1v;<}y*Oxv;;IK^+J&E-iB zEbaPcz1>^Vxtn^-baQAUd1K5aJs8V6OMubf``A9sBzNLl*v)wksD^=eLR)3KBKZX%{_jej05-zLJ&?^}SZ`b@jd|9f&5xVCwF z#^$|!@278ihiy6jU4-^+8nF0hGkz94sm|Y7-vn*pL)%)SK7rq6>>So+p6nC-@gv(l zXTkLvaP7n{=^j^)-)H&bM8grm1Fb~Iw{@T4Ar>|)zq5{@HDX)OMD3`K-(=M>lvT%o zt%Eg_Wr`_3G#XQm%&8Be!+pQ`8_W%TT_d@XK!*MdT_gJmpJqDY`Je3lbEG%i{Ks9$ z={@VP313-Slq_QI96$TfBZ-Z18`D`?QDo=Q4|smfL)g`aO7Qtd{`CICnt$#GCszPl z0^W4tSHbfk`WgQS`{jz<0?DN4R5SIK566yZ?0*@V0v`9y$SMQ=>gQ6XfOr>Z(si3t zvgOy9yQf%Uv*OOY5L>zFM$qlk;!q+XJGyGocyaY`Dyd;t@utv z6J*FKzAM4Gd3+;_vxqr|aEGpn_li@u&WQ+5qCVOqMs?%AAb-wXS1IkRpdR!~bT4}e zw620e*b9t||FN;iIl|i?<7dz#is5JhzI}nv5HX02*?n4+fpOQHZAU(I0T@%9Nk1M& zgHQKXV0@W5zSe4_(|ox847lVE0;avby!s{m5Iu?Au=clXj`P<8J$nfH?%e?Fz7F|- z?T5}28Jm;CQ^g%6rfc7Heiti-yuJQqXqLk~k61q9>P_cv#^x^O6Musp1zn?i$k&0N z^C`aHG9bOluZ4KesN$w&|KYc)gqI88ZN*SWD+(T24o^pBmKWFIUqVMlr(I&UgVd@U zfvJ?Sp?)d-7hJ_>=fq3c^X$AnF~^S%(w5(jVmQ?<>lM{%Q|+nUO#DQ&lD4D$tVI?N zFS@JL%;2o?wH3_WoUF>jTB}TLNYIHB)1)9|Rwt3W5CTX&n>imcFwD+*4$X| zw&hQ`GtBuSz3~1((KU--;9Tx}(?RSG>ofdjUSwu;!zK2 z^NhW(KyUTtLkHI4%R}DT@;Wm)B-vlansxy(hSY^3ty;}hebySb&egh?GY`a%P0!Wd zb2_+fmTZMx^i#Mf=lr!k?#nrU%|UN5S6_@LqN;3z%K&x!(pq zYmMv-c-sAJ)t1$P(v|8bhZ$IVs-1IKOA+hXzUvA{*NUEtp^r#>H=7qHXjqk!S8CCYHFlK=fz$qr;*J{V?D~{Rnft7u+#ux)yWQcn;B* zJ!ZzDo$yx^{dD0K_0NU3te+7++MGY5IG$&^YMd#pjN!(bjb)avYn&X5qZ-GxXQFT( zR_kuw%V)qiE#1tum}?7H#Rb{V`so!Yi(ud{wDi} zx|z4wu}~3fZ3hNhrkVZxlqq#$i;zK2xKmef=6WO=b*AO7HA@z&O~utUI@c_UPm6V} zRo|Vw`7N}I?&zv>rpy=Kqnxd@cCuh;TEu=Z^30geUND~Hwp_@WWXo*l+uo`t;JfPXglVUDuVlR+iXPY0P3 z%;8%c$RQK@^gTV`ITM!?{Q&ba$s~iFGd9LcqD8bP*%CtrV6)&K8`EdUF8UGQ1$Fo? z=-BziC!nX@YsH?m*__w4*!rD%Iyi$a%fCH--+*^stmd2}ZNZfE$w*sHN-}R`F7p}c z99iS=?V$jEfnbz4zGym_%Xc30qCxyYiX*E0(aw4|bLPL~*uZ1n9v;O6WygfiCZBBR zEpBa{{4h66f>ncRn zMslUI3)eZwv(t`Qw_1wUW<)VZo1S?KQ^deuV0^Z?dCwc&VOLW$gPD56i9?OS^Qw zbix_lw+HlHi|L(>*uH{;_1z~Dnf^Z$F!P0j;Weh($Ae^(-fNHE5@30NGAk{f*4g<~ zC42B{Si4-{xQkTxv+y2cI=UA=*Yh6a5A(O^=V<=~e5f?$qScl!?{_R;9$zN^QAxC) zXP-Ym$rwwJS9Rf&V$8Tv&f%TzvS;-LU`{c&D*%psXcgJGZdmXQF6*vd2|OLVYXPS5 zuxgLtX7&k2Y8zHWbuJ4sw*bD3JNYXTXH0c@Ftm0PyuRL&UeF8FAXt?gVq zKMp>6u^&U$R>aQ2j@y=-;l~WUL{IUv&!ghco_kEOc-!Yw@u<(AlDp!~@w~%^2}`ec zg$es#FbT(ZLtkvC=@;sU=o&8b=QzC=T>qF@4taKz{I+#`>+}HhSOSkshaNLEN4WP( zc8(x_TjiXBM?S9Ll+%w?FjDI@@+z5y7Rvo2yBVa2l)xlASbnU zy_)YErYKv}LOdepZgmi&RI?C!2A*f0SbYZH%eCTpXPVuY;^O!9+gZ*SsY4cs|B+FL z%h(I2GKy7IIg967Z|usJ)cT5asHZz~m*)jO$1pK#( z{xxy0u_-%7`IR?1Xt0xs0VR)MYYH^J*$n)3@?E^>4ID20H)6sT-=K*ug;yTv4g-7AH4p}T%RHg}ATl)M$>s|j|dTac7^e)Vz zH+Bc`OAcL--gzFqeOim|_c5nRalVG=-N#;%4t(QT^xi1CT~EKp=snhs=$**WTXfIR zdt){EFGTO9QT8AEi0jo1tH10L`ReZzxz>Jmp z_UX&WfS0g?^ldI=LVXzf^(D?k=mYNYdf~6pug@dr1xL9lS!eDpZ#W1pJCLU>=(?!m z<}(iJ`If>g;uoVW=(%@WM?1$agg^SNa`}b*d9#T=#T;I?`WfcS`1S4DM+z92Uvxdm zH3Clyr&aXvDZXRY3%%W&Z(zT#yKXl6F%Az%Pxv}R`a<;g z*yK^*F8yLA|5vl>sOFw|koD`mRnGUl@VAe}Tx9l(G)%O<>8;sgt4I66<5BQ+3unpq z)2Bbkv~z1#I|P!Kez9TGs- zD?b-!*JbLzk*jcqGqd!T%S_eW%iP5ktZ&t!lbO359(gQ~`V8Z)jc;#vqqo`$ynPyF z*wqU%b$lpOhv@oxfPE*_^%}U8|5d^J{bd%oC{b_uzS8z>a_+-ago76gw>_^4J%BBnhv+dV1e4Ws(2l>=O-pHo0yw;|{?!FKD z_}}2t{MXi*<%iz|UHOK6`VYYW;A2ZGt8(1}&6#DZ+d8q4{Pt~~>+PB^br`~c;FW>$fM1Ph#p#t?sd}{HEpZI~$@S@G|v)~Pbo8ye9@4QocHs&dU@Ht_PY>}M2^0=7wi}~-`*#&~f^_gIY{FXVBzA%C zVipczb2hsAHZVGR6Zb2idjcMmEmg-g#P=!IvUU+C{}4I`U)<3B_<SOFRZISid^9 zR2{Zd+?j6mWEFZ+XEA6VC3v4=uJTi4@OYeZj*W$99k})7;(yKHxdWcic(U?~y2|%B ztYssf~q6sppQ{zrDO1^K#k;;~`ch1UP*1AUPpUge)kyU?pK^xE0mgkNhL;fmqk8nePy1 z`;r8QA5X>k2=a+reNvX`*QHtYz+0szXl3Jr(kF^(zYm%VzFw^+>p63}w6n8T>%W$+brs8WzRcpCtc8n5>zt+fF2bTPd6!dOax%vK zN4du@J=6yuir!0kj%)m9=93z0hVwgYTGsfLZ$bIr1kVY^*F^K<<(jL*$4}Y$bV>BF zOy4vf=p%l<^YWsTc-;He&eqY1aY^E$m-Yn`S9owRweVgl2z%2Z$ zKG_>@0iW>jHrMfXkk6;md2z;u>>s|%)X+=c)&t|uX@3UrUd{bO^qF53^Ol>nch+SM zLuW-R(E z?8-gZms;1pjXc8P&Zk+24V2G+qc|~3_xh#|IB4AYKRh{iC1t88qx{&Jor3Ky;L%z@ zGjW@Io1!zZH0XVI;RN`+`OFjT*h{s zdh3v@vdO+dn_FmG=h!0i(jD@#G<)am^}+M|0^owYw_)4w1$M#rYu17ilq+|N?0E#x zXuI(EPso$oz?0f0e{w^nKQ4VyJu0vEZ^0(kUflf93$s@4F1z?Yt6!`E&5I#l_`dSG z0N0~|{B^STbzkkM4KB>^xG59)AJ3yX)2K6o+tXfzF|nyxi^ibQEvr zKpV+T=5KrTu9SB!GNB3HlWx6~^D+$gszdn`d~-}?3}w2BBa}|N2b?BIbDj~4a zj}!|(c22_B9K_R29mu{A^25%Pe@u)s@znm=3k_#S_D)pSAN8?T%HM?RsPHcEoGj=m zPZCp8f8$qTM-r#LawOEU?wN8ETBk8hyv)>9drhmpFYexmyu03$8@s@jY!&xp;AVZ3 zJ)h?TjNMprv;CIXX6lJ^rcAgc{*2^rc6_RI*)`yWF|t>Ea{FEOrc>90cKsz<;@Efw zdq&Xvvhy9v~avd1|kZG1kLx4e8RuZ$kgs8J+z7tp41^z4Wc{E7}dw??L)~0>96V==L^j zp?=^^vJ0Fz+*CNR`xWZcc%$GDfQ9kb6onm`ZcD4y<>&L*BEdio{S55f0Z^sL(xwfJP6 zeQh)Rj4Y|r+9LA?&g@xxcho7%7w?Jx*FqcNO3z`g?}qO) zf7yJ03^->a19~M(PFO?k3nXXan~-eM{CXH#YTwcxN#y_R{wn6JQ#YAF+)Xmxcm?J_6GV`?K!(YlsuLx6URShh!wTz z{bT8?NsqyQwjSV8J&fJ@@okBw@`oIO{{|+PZVBOcWq)w5U@qX@3DVeK(A#h})O#Sa zw;(}Z;v-tK^v+|KKm=988|Ki4!=&t7K<(|&_#QOw3kTvv5B_2(5S#Ecy4Z{ocK;M;6grs z%ZE13-?Qn3!@!d@qUG~WPmR89-nonz%XZcgZu5Apa{(~(-tK|+_wy*OU>EIc4}tKx zi@nE|7c=`R%EQ4IuE?B6l+E|Hy^3qS_G+(9sx4ry7e9*+P7BZD+Va~7lka!)yVozx zWsm_{^Lak7>00(h+BI~oKf_DcCSON3EDNKPs8ip|=6pQG8?Yx~Qs>pcLZE0T!DU&n z%(Q1Z4SXn;{#Rnxi2>+*nf3pd@CzKrw!s#|o+v|amohhl*1?(=C1*93^*n@4Vf(c^ z&#rxEm-WkuKQ!-BAOB7C>(AiR`LIVn`qf0cZ{u50ztN9b{W$jwCfPd{F2{pQFl5tl zLVl_Jut&e<4-2mrkL~KG=gZN#FEqkX-tQxN1vX{c z+dbCaF53IMe@lB6@7CVA_NQ&L-)l?jTzYn#k0iw}-L(4`w5wQ_h3E8h^vo~pwi0`u zm*z9JueDy=ek_jP-|>25Y@e^k!-`Y0u+)C!T>fqLV2PoZ@6W(;%LK4sN1_ise%uA_ zN8#!B^o;**6rS_WXPC&(J;C_u9|xaPGdjN7fpPr!`UEilXOAy*;dYDb9uL?5vGFwl zKh)M(8Yq6y@14^K zyrlHvLUi;7^nVlQc61|at$xSu>R=wEaoToY`LKU(hw7dP&M`jq){5lE+{apx!S4Sp zXQ`~`c`to=n%`6S`g#L81J+$H-{Qlc`p>e#fbZA9 zC*PTTli9J}V{*=4|2oG{n*Llr=j~Hx96hXgS0ypJ_#eIR?E-g){PVe{SaGDyz+00$ z>&{xPp};2>DQ=!J)q`5oqr9~@{jz*d2xF*9>G)uE)|~A#v}sg(b@IzH?^@0rN9+5F z>4z7JLp--r<_6#z?`IvOj~{oJ=?m*pVPXfr4h}k?O$mDo#d8s-z3YpU-7QbC2H3`$ z@-FBkcsig>Cs*Lx6Y}AuuXXo;PkzysV}7#fE|HrmvfCV@vHkBOb=Rzu#5pL(!9hOw(s*~Gr{LVumP|9HOggb9bXUY= zv|h>c&#=c_0uJyM^#2mwCGV<{?<oM}LHel;p&I z)jnV-u4ULhC&9lInsxFoha@-F+wo-DYinJL{UoraSw0?DGk3`kmZ|w<{6l#b`iQ3F zK+UFKY`}h*;a?b^6S6n#!cSfNDtrH?T$uJu%@O%Lss5b&QN$nBwNJ$6PzMhc|ICx_ z&_iq(eon#q*1RSv;Efi-(ed`#Su$AI5Ib228R0 zLcIrpX-0nXR$%M%V;@^Q$~ZxvD?&%5gTP1os2!(pKi5;xhU^^BL`9`(`=JN8Y7dad zwSYa6))C1^&GZM|>WK}M^~9g6oM>(G)9zl!bP(}v-a zYme|u_xDlWgWscIQeVCBuqxKEf;IQ_=1#=t8(!Zu79AA*>|$RgTD_1gok*{pz{L5q zn;F}3!S@39b)mnH-l#?b34yPFNt#?kv|BgOzpeeC{7x7i>A8U3)3G6Tjy79Z+jiv{ zM&DHGkpCPwYyUZZ{#uL9mq#4^jP1v)ec36mvFl}iVf=N%;Q0yeYw@QlgFY0qtNAN^ zWjAYsAD@^4%lA9ET6!CB{GxlwCGsiOeoOLp^!7={X040hswVI8I#hr70RA(2HgsAm zfHllK3sHX_@Jg2x&2k8c-f{y5)sLwz?NuxgHD)bPO zALBdUId{!Lc&y<5VQ%1k!K<;i_txmhJNmbLCjn0Z@D)22w@aXZNsJHBnx2TSmB6qH zSuTvv2e$QH%rk5|Myu9G<$sjV$(z`d(5Bk{C&s8#Iz;)4lH1qmk8r3l z4r#6?*lm3=^jYusU$7rn(T^{xA8}o0HunMUU!X37Kbz0k@~M;8-^rJkrZ`FDE6Jw* zPjcRg)>6rh2{J+@GSronJ6~R8?YMw{zz5`42~lqb^;Rx`u8{?QOCD3*H$#V>U(M^o zKHmdx(Y$<2c}Mhr9b@DfPfP;y;Nz69T|lfj?Wg{Q_J{ZDHQEm&C#YZ2o8@y2@cU=v zWc8AN-XXtC*K_3I4O~+S zh+|lf+zD(w0clQnp>b6 z@H@-e+hD``2zda!4dEy7ZpB_awr%5FD|RQ3Id~0gxL`wfybC$)@%_jT$bP6C(9u#3 zzV*=cl&Qp5)?7obVBT%x3=Vyx_b;(WpiKL9l!1@6cd(}GEDf!*vw60Uc@eLt{aXGP zFV{H%x_$zD>6>Ty#)389j)#%FB-w1$?Gbd<=0!<;oT9KXQ-SY5nd;w>LAW?HL6 zld>rU*mJ2*Dx*G)1D0*b>Q0}k?Q=ziZxcFK@(dI^1%zParJ<22G3X?z@eqDMBX{M^VL)Z5HB85>*{{VjnHWQ1ed34eyf+ff;?L$*eaQ0$Q$ez2W`db*NF8UoY(i4>iaG6_9NCeqJOY01IsQU zn7hj4S7eY&*C^hL%qrPQIgwrT;BWN4d$fPowx-+z|AkzBm66tuEE%I8<zl#aB3_o|EYFd~X`Sjqf^7Oec3S{H~5v*dvS{B8=|b{*aq@RQ~YA(!el zJKK>JYw)wGQ2k!h+&G>0p=k#iy?<*gn!eviTgH_(neVi$~{oD9J-3=^Z`9MvS4^*l}Z}*?} z6^ZU^$!jN?Y@)rk<3?J5Hp9qfUhq?8+HpG}&hGK>EW~+4g8c=`NLOUiAC1FLUhLnp zV(!d_82hE4fotihg4L52Ri{7F=w&=qk8poC`*+a*G0=xRZqK0-{AB#w!;QLz_^$c! za4vd~;>Fv?o!H7ZJ@||f7gyn3Th|^;J=iFpuH+W)Ru8aw!P5rH)S_$F{8=0^PpIsM zF?QL+a%vy{g^0>t?_Kb|%FQQNqv{-~Trgh0zN-Zu@r?_~?6ytp!ZytT{~L6OjvRZ+ zJ<1~IYSXjlWzwyOD z>+7tq$9TVv_dfhyddQ2Su{r)~Q)9C$8GnqFLqRj8VK3{EK|Y%8t__Vl@h@`bQ8Kjr z=JbF)_oxS3A@Rc5evPZ`-;-hGx0d`mJnr1!Z-`%_mAAv$M-z_$9H9eEx!!5aE&Aho zrm#oriTo7dAO6$sQ5FvgGY+3(jdJ*f$$bERVfkWJ&m%|MTynLQa{iPp=X7EJl#KbK zKlE>z4Sh3zTGe;ugz=l1=*5^<)ER`{b;eK#p5f$5)48@TaNWi80N010S$*Hbn*U4D zEcJ+vl=q^6`K7v2sVg*!x{y^v&_Y3cZu_Z!*(F9%n=8=n-9MwjH`;i72Dskn$^hpR z8ko0_O?liMG2P)t&5xT;lUtA+HRQ30+zU+2$iU(u&DP$5c*6bY1#J66YbtpIzrvYd zZ@W{=?z;&78!>CDfy2Z3Kh0|kwtCN1c7&3N;aWQ?x1d=uU=DL)(KDGR0VLywl45=u9UOYnsk@ADY=K>|}f_jj@u zo1CBZDex8B8Q-_Er%&9sYJO~jmyG1QHTG3IAA#>1Jqy2l*IjoF%m>RK&7m3FFQyB+ zkX`XK^uGmKna(&z$*V8{K65Yni6FQk)~?!A{}RU@-ha^&kbK2o_nu=+x`UfL1PT?tM?7l5r2J+WPXM)XzcUWZQA;n&xxt&bQ>&A-99=lGis*K6B* z{TTDwfPcwfhk0$~wiEyAW}Y5HRwyCwoA{VwuYYIF|Iz-#I+sj*Z0R5$LucaG9F)F7 z^LYmI`3Ss=WKIj)@GINzvggurVx%e=nc7U|%wnx8!}P({xkol#OBQ~czH2>}o~8#po_1fP zGk|tK0zY^3j1}BV&j?@7tiY#|GeveK@!OkZ)@%c&=1g;;YzUHZgZwWV`?Pz)oS&j6 z3diMP`H_X`_Y&R(TQ%p35I5d~QX@aq`fV(f3N?fx_o%J52EM zI~*t;t<3824rZE3s00 zCzv(TEyj=w-gJ-gpFs|np7EG+8^?JZ^Bo;&YV;j!W5n8I?%izG>?5{q@ApkBkI%rj z?h(EnZGZbVb4hyHZSalQdYexuKjcx`vGg0X*~UDNO@&7wlUTA2J>KgfgU zyUeVVF#Mq&{!mSf3N&u*6Edj(4f+9{mrG}5&t+R5znJ!g;{jMdqMr|9ueW8qp*DGE zzIQ?oSqb#;BJ&IyOBbCQ6KwmaGv^Kd3eY=>UloowTIaLQH(soK0O*sDFA8;K?)zC& zm`m0f&CQlx#j=GzR`~cla#9C=QDP?dIOk{#-5=VqJ0<_wzXkcFhxuXA({=IxHAf_m zpr_wbgg+vosnG^2HyV{iR_T{%7^%eKP{WNR(^Jo-V~*6L5x4~MOJEgSK= z_S^f|zq3~c&+X;lUhf2x2YX6rxe)sv5qtBh@FK$@V(vV zM7>tRzF+$&&3{j#eR!JJvJVT7`Rs26bIRv0U>~Mk*@zSIsy%?t+dk(uB84yLJluht zu&hb=VNd0pT<}L9hyhQkX#>A8|XfG1-t1~jdScxv zo=3kQ|$d4<5w)(KtIo<`(%ICDn0~XTJt6BAg}Z6DDdR-O!DuSm3UU_AMoN2ZQx#u)Ft|b?qoTs>BPxSPsq1Q5__+OD9 z^i3yqi*MWU4f4nKMA=tu)*9FaKhpn-n{4A5GKzmCIicG)+bl$U$;q4NwzRMpE8uL0 zOn7Ml=TG%8U%i|`RcmJTyuE%y<3!*Y!|!M4$CrI0(0idhBR&QV-=>a<{F-!b1Ubs4 z@Ld;q5hVA2{}|^J0uTOV*7($^M)pf{2JM-N+hho@jj?u?_+E?zLUEnrAAGJrgw|ZvpX6*HiKl%?JM;@5f(ys( zjwCy7?r`+v!}kfc-dQ+mv-T?4=nx0b{vO)@Z0o$gn0qF1)%X>%emOGpNOE_Kv;8K3 zuTFG^@UHgny=VKro}z!YEHsgQ*#t|6_;>0T=Wdzq%tOR6AF+3U@dl&|8wSN2%s>#7|_i(=YXdk$qnZ(ifwhnmP6 z%l{P*lOtC93>Wn%uXunuH9kKQEX?(tTsOg&^lY2;EW`h6&CM~H4V6>KmpvxCLGQ?) z*!O+4%YJnh_O+1qt@*UgdM2L`*+51bldC(lXL;IMZ%)V0VaF+=PixP{Lt1-M4okl0 zjCAe0qfbmUZjoH%fi^wAG8Uo(TvWoIj4`UHx~#47ATfOQ+(XU;Cq9-T%?B^#OSWk!G1O+E7heOen>TWXv!=*$Q=2Br5mM_c~%N7uV7o4o8yz2IB= zBIz)OdXgSOi)3$ze3cW4eYW%Zeygwn(J*K#)f45P1qr&)Xq-HLS1=k@vp}(WjBI)%VVf?=^~Ue64XEQWCjt z*pKOd?O!~eeZ?_wtv(-xcgkjWn(x)$8LZ*4olZ`|tmuD1TNeKEZTxkCKh1IBZ`;}a zSKg#6g-o6z;z zd{6UBGL7_F@FgqXHGP|zD9aQ9Us$pX>$!Yr!P}5>=S~djNCW6ct#8SDJ)Q63d9RD* zhw8`xBg-n;{v2|fC{IBl^#cR+IhYgh0Bs!OXW-+KrFwxw{IKnV{t7#9wa4$&KUBuZ z_M6I?63<_ajPasmjN>E77-x`KojrG=9Q*n+9c_L&yWDu@HyktaF9wEQJS zufyTqY{6W|*<4mUOZ&-1E{j*3T;y)i843MU&EkogC;tmR(FYCupZEmxwG-IyXI^(g zKlCv&lfEX($%FpTw%$Ry2I7CBPeMa^jKy5?LqQt@zB8pu&||VE%rYlOw(&d<`GtMm zt|{Q@zo@$dU4#c*@PE5xzyQy_0*>^YJ#lLR_lfo0@TH{4qpT+l)bkegY&4T1;c5Gu zL(}S;o7iXKgMmI6df<8O+WO{NXktIlZlNC4m#O+JJu1EqCVZ5=8#2s!XWuww@+cFw zk1u z<=Zya%7f>7X>83ttu@+D@1Gp;{cbEgE8OT+-()kcpNt<>{JwNDsh;BZ;8`|dd!Id^ zul8%7B)+^3ommUtmpn4MRe7lP(?$V!FW~?AnhW)#t$i^$`>p(?L*6a1`J4Ef_@LGt z{E4q&JO}EKyqCcq?Lzum@hItQ2kg%Y@pIxmo9>i+lo1tuh#vm+Amau8Q}7)MK}$_@ zjozkov#*f%O$UtLWc1oi-jhmjGg_mNLAC_c13&ga3YaKTwL zGd;lCy%X8pKG(q73-5&PQs95+vx<9m8pY&rjf7ca-lCj;RG?UVUVG*p;ICnRfn~E) z9`!1|E4VUH6#KA$)|Y5sy13VOj)}PG$5Fn`=YPprDddv7FET5QO33mU$PTsNP#3TP`xThgJ1MiC!Rxq^gpc9}`nR+;WJ zmAi`X)b?1~4!OoOjQvuQ?5b(nAF2JH;b+J=M?PqS$HD6zU4X&gAfL1MkfQga<6JGX z(Xm)QweUT3mYYS-)8Lnii`Kr*!@S3jx)qtadI{qS{*~V()A(dv`)kN?@>4j8EPWdO zrEgrHgwFY<5t@@7bFa(5E}rRsD85fbZ+J~TeJpr)aNo+lDP419FVCF4Pa-e52|Zbe zeV*!17GD~H$AMqv5bra1zrosP5AwGb>eak&1{RgkzBmhixNhz>7v&4$Z<*IBJ=lKM zTj@ka`#MLhneSTo=^VA;eqlApPdB~aFCS0a2a&qw_nCe2$#nE9iE;?>VbKlId96;H ztnbUVfz$8;_$|DD)29M`$``ba_I`*Tf&OoX{}OM|vkhMQ1UM$Ap~c@r9}fB>#=wtN zCp<-EE2&fXtft>j@cpJwp@-u;_(@vThl6n>YUeT9>7Z?u7Y|AFy(+{>FE$a|5g?8|D85)~^Tj&O zq7h%L^Pnq;msz5DdJ+0Xcz10s*N+)%Sbth;;o-`CFMKuu%hwoBt1gGf00 zFDd+*7c47+FDz7E-R1>;trKb0tP$eD1@PcP@F7}gUQkt3g8#I~yo7RvMN8q+mGIv* z;8vMR`sVBhb%p@(Ksmyx=aEmhdeGAuj493;nfF?K$QkH^n?6wP#nEOy_S?Glf26Kw zx&8;^-+b*wTYFMH<8le>dW2N8j(Z18|$uXe<2-M z{A0HYPvYM#YeK6xOfyysPZ9d5@k<2{^syY;iv0LD=xWHjH@N?>eIlB_`F(3G4FhlZ z8{bFv^OX6f*=M$QS0IN>Ys(F>Z}&B`4`U8X21o;Eo{mQAyKQ_|@bXsczKZ+MEB3wm z=83Pc$SKHD0ta9IoTQ6$GxZ%Z5uf zCkLL+1bF5rz{8$p0G@ro;J_nZlfQa`72~76Nna4l_?CQ%YW|q<&BiMBTAzl$^dqaj zi9XGOyK4~c0e=L`KgZs)S!GPmz&Tx7V{XKMtC>Ao4{Hjzp4pA9%7ZUfi24hz+_zeG z<^q#EDCDp8GoIC>k{i&=uW5tcLuS??FTPK6c&8jX&OXZ7r(v72^tBr6pTldi*-MeQ z0G^l~ox)z--a~SBAN*JLFz0*u{M$Ks2EYFS--DBxT4OYKHW$Kqhc#_dgYctU+(CA#Sr|vu^pUO#yTQ7STOy>Ub~|!w-|k!iM#k7 zW&V>fXl`Wv1Sji{puTFTTX`|-SiGlk>x6SCQPchedMA`tMMTacxGE%&l}R*i2EnZG_w!YQ9rVF`*iTBIkn2xnQLE~DSd1`??sC# zoU2iZuG`XI<4>jewAgLbEXKE|%e@hws;p=_cr}o@o}{jz?MGqdu++QH0Gs4z-7ip| znOE8yJW5%IHXWI7F7N3Zd#DS?;yHRfbYI$2w+olQd%@9n6W4C}4cNRxdmQAM-6~_r zGYNSMLZShlA7?&>H3z>F=w%&@v{^X^p-Jvvr|-gX1#v%Blqa@m*Tb@9nDY7X4)OW8 zk-gY9#$Pynh41z)o6)OwQ|Z&bWh;8~`1Vff*iM-zd*~;0SOv|sU?WjEx83)sasZ9g z$0qvN#&?GPD5J}u9B=oRe!QDmKUpz_<=^8zdsJOJxs^MNh1pxBD>P5lm)F31@-$!D z=Uega$SO5n*3g!AZ|hFhmORQSejJ!|*3f2MhvQD|cxXrZt7O{9BPMhKYta1tu1JBE zlg9`95qyc~EZHzV%(or1-2n^^yg|Or%6CU{s0-h)g)PLxR+i^%6(4M)EbuKn2z)c( z>w-`4jKmi&2v^roxE$Zlm^(eHGpfAAjQG>gYY=nSLvC^D6G|Bm_6Opd<;14e1mF>N z+lhWE=bwje5>IM)kT0PD(ccQ_PjfO4oO%N5+E=-;FnXe>VOfs8Joid;u4-b zU>p{Ya>i*iM>6qa z*^F5mW2L!mFjl*#nB{&wFYUi`GH=K6tb`tSs>)PN!{=LLmFQx7ug1Z9n;z3SW z%zYmB+P@BmS-hl^pZvyzDImMQ>Rm3_Po?c+kGo~fqB;$#^E8# z@4IhC?>@$6Gj*$u8lH*w$j%(&TYSOOy30pvjzknYr*lR`?-q@XkXJU3vop%TNmare z7@+@ze1F<&E57n%QtuqAu6Q2wJCW;^2VHxp+0>s!-r5{;dbhF0On|1kr%c%D&kFPr zbNx6v-ipX;*nEgJHIXA{FqdkHwcZAevhS$Tex!1Qm}~m~m}iMCXGrI7%`fT~jjR5d zS%#Hc^ZEq*&*2`Mf+as#^KQToc<}u4zX~r2b8skL?D5wa97o2+Hl5CdX%!FKON^Mt zvzGJ7@V%_aWE>y<{fge%7_$Cuq)ZSzYRu*@9NoI=uG~e?q<=GIn!#x@ zxb*Vvbk6b7ejwC^otx_^+EclaXiGHzDs%M|b8;8<-zHa%mG2^al$c=jM9OWcb=blw zW3KpW;ZyIC0cH=pquAhWSI&NQb*DzWi| z`2k-~fO*_c-IeJ)@z_6eTxbDp*9*<7CSbDq7RXXviN{G4Z!V-y!G zU6=Fh8J-D`L*p8e8RE7(3tz~~;Oj99UoO7q=Y0Q!>b!_&{G4Y$*Ry1v@pGQF=-E{~ zc@{c|6A8+pzo_(t$NmtipbB!#AaFY5Y*(!N`Dy`!}6X08@~67st^a@B3vkmOI~39<)d?VK#z9Xe=(JyD__ z`c7LmFPF|?KEBrI7XJzOJ8+T%LmhKec+5|WbZHzb1qdx`{FLfjPsP_*QOXc{|D|Qe2Pc;Pr5JizlSgW zO62k&_Q_1_Jos(a6e1sp|K^kD$I%hk{^-i{pXpyuTqt{P|2=#sdE`3!@-n=<8GjRR zDSJ3}?8w)DG49;;9reEk&J|Z5g3co=z(e?v zN34C)DC9uVKKqzm%};MF523eSf$mKD1JT?`>fJ$GCT&aRx}5&ne!3axBjh)q6V12Y zW%_Rc)=F?2M3?HRx8(rzvG^2E!UsU{eG@HRBy{ap{p!z2+6}oD?`ffJ{GY3r0CVMB zCG=HaPYh9_&Hh=7XG$JS z4#aaCA;Tv6nI?ZN(DxkekCdYzdG$xUTlo?1>Ms0Fyd|}3TqL>E)(r;D#l7Sp?=cv& z8=&3ifn`J7?zths?isAV!_ouXP`|kMLS+`zOAlo%tVSnkJQ(P^5&T|Yy^eA&XkS&F z*LheQL_?j(KcVHa0U(c)n+Q6XRe7J4KV+8&+cdDxTF#hM^6g#7HI?@aeODcS_o&`c z&zIx>e~D{oot?W&^+KPsLf>Sr_PGz=p?4cKZ(WDKsQZ7k?{Cq!d~=ho4+Z+JWuGYA z<$$|;c91s?zV$M+6LtxIc1&X(ym}w_b71{6-|pktC%J~#-O<~|yE<|m)y2yM>CY#4 z_sSH`WLanBJX-iYa3>ls;<@ZO$zK~4iQ9?zc7vLCTj~@w+&e8C} z!8x(;Bj&`=cSGmI(04=U#L#y`=fseA^z*Ns6AS-}IWeTn@Hz1dWZ2&sf8oUiyUo@3hVXiBZ0!@l<>fQUidjh(0_P&mcu~KJ3 zYuty(WoER4IVOIcX-M~((IcLxZ>6ixFcdeD5$#~?R}xzyIbZt> z5`Bu^U&MR)1tJr+C(0DV^*8FL?1J+DyAj%pS-I_BCbxZd&jDcg?-=dG_r_2QNIMF-BH0PIMCcqdsZe^xVhuJnFT_@qGP{(?|Ks2H1z_EXSLv{~mb3 z8Tu0YL}}lj*mFsru#a4r^82UWnOgHCN9_lvx>mvurAO&xj*~Cza0uSv0cWeINAj0l zkFA#gSLMoUN*#wo@Du4o>ZwQ1g)hN752G5ITzgDx4@$*d})>hY#+vj8YDr_Bw?-u)i;Az)BTX^#9 zzdT~(6LX&U{}Sa}vfmqKf#oNpvIb>Ee~vtA+3M#F=5Pto&nkYh<73BUEwOca1MQeb zi(#5As>2?ap?=i%`FFPe5#z-6wXS^<_08b-#5~TbAqMXX=tDHxgg%D#prR=qzf4_o z=`+_$fxGD{^on}+3H#jA@J+@-=^C4Ddx`%^U^~nB=Tr6s`bg>NXTk5lOS=4wE_Ci( z3v(bSJ~V$GdQDG*^zGOuh_}tG-p1T7ER|fEZ0YHw13!oy{u_9qqmN(5y<+y}fNx6= zL!Z!5wy%KJD(Pb#<9>!Vz}I6FP4r{3Eg%D?KvU1TJkQ<4*b^^$qLTUj3VU4jGe$o> zQ|ik-(_YFAqaWGBK5Y+rioN(v3t#)e)o1DBBx16jMxRl}eK-vrAhB82d%jnnyq}q8 z`9UP7c^fQ!+roK$LGGjIXxK0G?4y6^$odx0N1fdxxMKK=JFwpi?1D`^9X!g$m!tZn zGrNj5HQ&U4Xv>;!;zLW|?FM{T{78Ob!{0L({d%W<67w+7PtUp0^waL&_CFIBVD*oD z8no%avj}*E(`mmK9ojl_@zqNw1m2GUc{H-$`XBbPXL-grtQZ1hmcA0V(N`JY z)9nYw5N9*=KT4d_dgAHEp?4dLp7C4!-(VECq}G);6EEMrh5t7h#T)gGXUD&bo{R6g zn~Y+`l*G0g#jI29_}~6*<73fgHSzc4Lfr0X)-d z*=ej;m6`LgeRX3$;JnL2bFQ)XPxrE~?qRQ2LH$+mL~QQ6MzZY$vF%i0AMy|ftr(W> z5BqoDNNi9idt3FlgE6V2T@OCjnrk88@?31kJL~Gh&S>exp;47N4vp#Ac4$603g2ve z9)e!{wej1RJ~x=kz1~BUWyCLs=!b0Fs@JjM$-d3m$PLL^?45WoTjn-s?m{$Qn{dC1s|N|3h(}e-A$EXnSaU##@orQz*^GJAQvjgTC#Gg)Zdk=YleHD8n z(T;rJR`+sNQbj32ULpl`gM;X9{Tyh+TF3;C;$Was9&?UtLQV@@2S^#X0x``wMRK?CN5{} z7N48KSk6lrPxbRP{zo5>?q^)3!%{4j)|nQ@EEk>;a%CPe&Flu9*{Jbd3(oUK8MC4v zHFnIW+^U&HGl8vzG3>?`+Vq~UNbT*VJ?W5aoKB1$LEdI=&lIlr#9{FOi}Xt9K&-Lw zFeZ=N@33Rrz7{{@fAMFn@qHuYF&SwLu7B3q7mVa*@h?`qsC>-iJMl(FQlZA=k5|Xz zVQR+_8)6b0GQQ<~0hC^5_j=u4xu}nGg4ehsq)&+JU(Nn?qv$TKia2U2@S@Ml@7+03@=AJ zTFXYNLpI2+XWZETCq&;Q??+$^v04^BU6De;^XU=bAzs6(Yu?E4cm)q-4D<^lmP>?Z zSREETJLQisG5QSf)ZuR?znhn_%Uf`*cSYQS>*f*QIu?g((a3NegFa%E8GvhexkR{z zy|dtQ3r|_me->Quoj0okece-UZn1P~oMU=av4-mdeSf@K&yAJ>Y*n7FB+DlBM#lAp zvWJ|SS6Y0Mva*4m-8rxL40TAZb?)^}WyF*3)f`~no@LIiwC*#ar?`(Ty}mf4|7q)8 z{+C@#zG~<0KYYQw#)*F6-M_N+r0j3d0UEoDsRQ|v}?%!h_fiILxr}!+eCi*lF`q$cd?8VD;{=z0(7HNMLem;x#L@WKZ?iShi`-$P| zpUxaxY8J124*an;ofo^cGakDIk4e;TI&-W-v_qKz+8JIhk#>f?v*(x{4=@(mIW?8M zsf-`IAzd;A`*y$h=oVn7KmOQKCnhU1x*EJ_eK~hOIqs=hQtBB78?Hv^J}4`QwuVtl{`&F7=l%x=({Zk?YEUVAP}-g-A@#D~qv>8fiz{fzN!8Dlb+ z{wV&>3(iBFB_HM-`QzVm6^A%KzI(l^7zIRTE%_i+mb`6?yuXXBZZXM~loRN?2Rqmi zbYd>f2(#(|ck0_>;x_5qHFaa93(AgaO`4*1;_q*E-t%4O)Ih7_Z}}to_WDj2d5vV_ zu=6a-)>cXm3ekA>7-VJf+WqcGFT7Vac&+uvl)Dwb%`U!mVhuaJ{omek$mv=4IoVMd5^YGzG){a6+(T~`4ILDHRh=xtuQ|=T zzp$YPT3QAjJ-0TnX|Xw`rw3jfHZxO*Rd1EPK=VX(d<8mm>d+h!eNSiKpt|&ZH2!@G z<)ot&z3QCg?kOqEEu(k=|8D^9ZtCsvr)&*D8}cRBm}{&oJS6aV_C&41V{Bf0-7Oi) zJ3Eo}#oORTk&1u)Xxq?8{{1Cxp&i+K(^2#*+E(1ou_Yv1qEK(9XY>Vj9Ky zNk*E7IqZcW5YN;jpSdhEyR3_L&!dTX@mx0+P2^bqHUZYS*tY_O#GABwI14>C+Gs7| zJlT1)C%ASJb21WMyHb=>Qn3VqKJ}lzHza|Jk^IngF0m6Aan5Z--h0}{S?GM6J^0t+ zY&Q6s#&|opI(#0k0&A3mGXYm?u-T@0@U`~hOYh*S8(aui^c_Fj(oNtD-JTt1YRyk@ zw&OnEB6}WXkBGC9QCZc(n}f5ZgE+(XGbnG2gtMo>#h~B5?Mrg*{tK_0J1PG;@I5gB zchH)Jw~^@Vo;Z!gz&rjQ%bHg>o&%N)E*zLvhrqEYBG6Mll8zIr`vRk?D?y1}{NI|@7<@Cs|} z67cQNr|>SEiAKK&{{E8vcgTJh`NGz(I{IeA{{*};ZWr<6-*!zG?MIH<^64X73*s{3 z70~AswqKof9z49YdP>~aDF^uiS>r*S&oU8tXA(TlbN{`)g;OVO^>+n|kGtfvmEj*v ztdaJ)L*5ayQJc`t@_5^l(XF)v`Cs3%{>jdQ+?L_Lj(W83^ibxmc=@j;)KSPi@+^AX z0~gfeeiU7v`z8F(ZCdi2_#633{V!)5GiM;XFNn8S&ef6KQ{(OZ9C+gL--FkIi(K#` z`7d<;eU_i$*&K4i;|o~v2KvMg-Dy_d33SpyaDL;t(Q|^}s@W)*%^I#4R(#R3{omw# zZs)pUx!D+7~32E8-b*t}mD8v1g1(0Gx^EXg-VQSn$Sq>#+_e@0$E~v8`dh zKCeGBj8>gBP+$i7&{0MTy`0NHeeJf{Tg~P)}Yop`ryk%Ur8VKaJHB;2Os6$ z*}wfAIQuUD-^q1g%%Pu+#^1~iv9|iEY+Fb(Wx|{z>S3R^m3!?wZ{!echpZ=AyG}A6_R`NF zo8|Mpa-DT@W{LEko7q?1FPNd7$>82J9>n)1%m3Av$cuGR;Dqv;Nd}#=eN(RdZ1pac zeVirdg0Bg@UvVJske;gz<)7AfD(l+0Dc2P*tNr!OwBupF>|<|J4;|jl|IKlJ_XX~y z^VUA`B=?g0boToVz@caFSnun~l@kYk{iF*VIPg3|pViL*Z3~W{AvfFf;2rQ+&BLBu zn$TV$YnSp#IP+5b(yOgDjMh2UHPC9eAw6oAzm7KSeJZ{hBlr#fZ}@%&?T9`madl|! z{|&ysf%s*3$wR~WzM0U@DaK!IUx4q^=M4X^s7JVmmNR4V^6zqWV11N(`2Is5#rFsG z8G~z}=vX{{9^<5O+Yc@LKA~RKDO$9~Fy9`-V~oL3_>$x6-8sUX{|R-RV6I88s(SZf zL%{!NQ}_Zhj(lUA+50VH9tYX~$(LGvNz_AZ)ZFC{KSS#}VAt~4`4oPd0CyHJ8@7M0 zwPxdgS-p-q68Fy?QO=SKuQfYh=B8^bzMoLvBJQOpa(og5$4GnuUJ$(z{b3NA&e@-5 z**RBU%%0Yjk>Xt&OxcmFwJj?;d##!AJMiK8)7Lx9s(X9qbCypYyswh{elM{X7k}zt ztw46KzEUz7Yr5iRyp)gOYf!*h_8zl$jxXY_Il!2C*M5D|0&p8FonxJI@9;Ov|6tf0 z8S0l}oq4?bcKYY6=@aa}5`M)RLk>sDE!G);@pJT`$LMRs%Ex7r=oHygcs3K?&CCB= zcE{e?`bO)D{E91O1EEf0ma$`GL_fj#HIiWxeF|)R$}e|-HZ6Yu$sMy3U^{VSP`+{e zZ3fQuyJigj`5BfiY8q{vfleOzcN?~6;C&9fjn=bFzide7w>#wA&DKYdb3@n~43oT; z>%srup`B#py{|D>3$*_z9pri7KsvbcG2p~{H;mt%q+jS?T0h|G z%xUTD>Rf@=e**^kW!V~>J_~N`$FvXYMDCEU`gZ7`C*ga=`5T-YV8KfqgEa@>%YV%r zSb*--I?vMWF6?Bk$FQj%TgqC$o_UY1;|cJoIr1L~?QP`VmObtL&q(=HuKqRngsunj zUZNlMAQ#wqFVoT;y1h^?lp%RgEI*V%dJ&wDGmkjW>jXOA3h73=`^iIse`yQX!uoLT zB51M7Wvmfzas28=&Vy*zdlvq9lJRHV6`!=u$C!=pf#Ns5hhOy?*6>DtA5gz!j}W{N znV{VRZbBE?|055yYwjwiJ91Rb6nA4I`X$>p7MTA*FvBB}xhg90&lKD~b5eB+eZY_5 z#C*mqxiqlQV{9!a7WA;EaDo3gGGds1#K@bTd}H>EjaM0@C0C9qV@;U-@@9S+Mn;3m z-}piQ?o-%56&qsNwAkBuDJy#svhNz55uxX|F@C{#oaMk;GF3jdiFx|%oLJ27_Er2I z=J(h3dxz`cfqqke`={dl4&cMk$+&3TN}*x&Sr+WNVq4GAx`$tSj>GTkkdqc0%AHi# zeii*#0?t`Ki{+P5QaN!s|5uoCABK|8XDzSie?MhYCfITDB_AKZT=J#xS)f?@bH18x zaJ)ce;ME$tl7f`wr96wB+neq)&FW*6U#9ny>&oBae1U>YBi*}X;O9;goI^A0+(p?LSeN};mv_=POQwApKcADM(1b0?UEY52 zku!Lg&77#i59l81SOrcro*w)oLhxXNa}I!*h?h zN1^{s@;}mV=R{_|GP8xbB-|>7dp0^dVp5BIZ-6tt^#Er9&nz7M8XpVb4U^|W@T2E! z72jV5EJ41%(76>$c(3*A^`{19jYPjb2cNPLdyvnAlZ}~|a~;if47PwC`jUbT)1MS* z#aGE1R}c3-aI9RavfCxr_ZGG^y+>}CRcCzSC9n6n+=A`Nlf`zKwi%f;|b?1tWXz@&mv& zAG#G@+klNdbe~{NrOwyUoxMc+-R?kNblNM8KLds%W58wpOO2m1JyAEfX^X><5{Dtx zOtpN}|HL=*;$!rV_nF4ywSix>{mQ-d2>NF)aJ2~j^_9P9Gd&I7Y3{A>@IJ;*aRbfN z`zSHs|3bNCYsYSVTkq<Zf19E2#tD`4s3OW{ygMp6T!1d;Y}uXxxHkYFYKPhDOioEB5r8qpPPfhP9=a_GoRC zP2x>(K)vnFe2eX&L*EDad8uRi>d+p`j>mTerPCXd&2bIM)blyok`7WZz6Gys101keW0}g7>>JB{PeBbMZcjj3vS=)!aaI#+vhyh^XMb~;lz3yGeu`r zRv)VNEMtD@Ikr11&S3aFgnzc5|4j1Th4sf7Ps*--g>t+4@sZd|BltnK>fDYJ&H>wx zEhI!awQJ2Ko_7p8_aIC=+AD-U96a|R#F+-KQjZTlwv91Xjv_->&SkD-3}i#>;(x2n zESG@-BTxZOhL7njWOzZuAf20@Lz`Eb=kDPYy>j%I4GT#O2j9JXh^7q`p z@_yhU*{5RIn+@~JKS(~evZEWBOSaU-g@O0F-A3^kS7*C$@8SH5Zuhr2&&1Q9KHk7D zOnl~oLLc2cauHDgxvjMWk(0uvczjc@sB_lDdi zc*|+#wAu;sn@&AFW%#PLkFQyZoi;XuyoRRHdJFG7u5IPL4WnCQn?|=@kz&lm9#{Sr z@86zbMp|xNwkOO_vTv!Ag9;p$LZjj5#xLB!FLmvxMVCVt9^kmh99>46?FISIHjaXJ zyQw3@wcys!9%LtE9_*s-X?`z!Fqk#JY=h}7OU4$v7y zi9e6=+;gkV6Fus?nUs9c9F?3r?b+Od{NLm*fbaHDX4$P*?9p1I`bC>PWt>6Bc=vq{ zyBOcS&D>PqN^ULKqcz(HZ`lq`^!^NZ*4l3AeYyWW<ly{*q<8yCI4&HHy4kAu)^bDUngJn@`q zaeCbV&1qhFpgYmwaN6@ib0w5LetA>wO>tV=z}P$RNKbhXnk$j40?#ObJ{-Cdy)B5- zmEdvcP_z_^)1l}lxF(!oKP%i*sPC!Ep)cw19r|heCNeED!O90lEn4@$10CX_ollI5 zyt<2hZ@Q7Ld0m)KUMzUaah{)L&6|Jg%srwr*V+dn!yR@aptTmw)53)1{eF`x7Zh2 z*K$2OX)beYZfo#HW5W0Hd*?Ji(7ZL5wfWXs_oCK`#?1MwD{b(olC8+4^^fH0>`3PB z!ovE?BQw071_#G0lLN)8#s-Sloq3=m?8-_JF4xdj6*f=teov`8BAoBwz3KHf1iYCI zUhvx8Gq08LtO;Y&i~Y&UojA+#mt;(S49>L%)#1x?FLI(6oFl`)4?WSh8KdS~tM_=; zj@g=ATG$Z1b^0FG{WQ*7tey}2`S6tuKOemDsQ4S-%!l{rSq^12?%$>j_1|ez zZ9u=7*bZu%f$1#yS5z(o|D7q&hzs9X(S~RwOr4?+(U*sB92{RKdIPsX@W%7jsR?+V z(bsR~_2|Uj^=j&1p9cGral!sP&p4LQzy7C3wZ@oly6>TV^=s)h8yl_vxfh=AjQ@Y> zHCu97vufT*-+)|CZajWJ#6EY|EnnSp5qpp$rl)#8bl!b3JJJSDN^ZSsPmKDnp+Axp z-*%5xKD6>LVXNB<9u3xwz50f}$N0u~>y>+A#@H0rk+g2UyWy5E@9EMgyxA^f)&P4XiH;y0CYd=frqw_LgBPjdpTk`H4~f3np?PklAAsoLor zW4E(Cp`B){+^E*$)E~U{D|=MueA>TSax34fo~yBMSl`3{TiaWUt{_5HDb)@f{MLFSEj)#OF~ z)GwqwdQo!OAHkY^Ci`yuZ;uqC3|4L6y{SeYqm9C@Z}SKw&n}Zta@3O(P3ov zQ-2IveBYf_7OZXFIsyFW(C>R~e(_=(bl=}7ezE0$#4o-A&g1+7{q6bu;vDZs;up?Z z^CM_h{Gt+mano>qL0?AZ7lZxhETh5xi(e%4KO4MCMroi8tv61anp0t(A$MCm>0#4o6eau&ZhL_a)yATFc@7%$MgEE=u4PuO#w{fxZmq6@z_w z!QvI!3A`fP<`sNv^9ugwevnsWS6_O~!xpdjHM~Oo6R-I9A-p2Hx(i;RdH)Ld@WCt2 zFz00>PvjNf7{V*E|3|!{?dv~%=nnh4!ARL;Qj>7A$^|huq`vi+cEl#_#|#!~m~w#*g~GIBfj({-S@& zTbwBsjPv`SaDRq5BRVMnhe7b+@QSneA`;g$%QL_yZb>N{om|#%wEsLlu@T-OKC!K< zzvAJp{=xZ-zDN6vzeLaT#V6wPc?$csk?8ph=PC$R@+ReQ{s(fpk?Gc+nDzT8>xiD! zBRhzG(T8sCTIz{(;&=M-53uRL?|cR5+gK+H(doU#x=N0NV);a_7;QwJCs(1v+qAxY z79M27Ja*U`H)fCr(dP@T*$!_bhIS~7@FNRG+PDuG)rUIv{lKwMKFZo}d-fS=_&F2{ zF8nw9j>{Knl+n72+;G|(O26>nLruBKuH=*$dUw{d!zHEu9_bqjuL(EKG$&a;gtNHf z4^#0DIO7a~37^F`#_8i*;OK>98Ruj`;a-Hxw}+tSH_>bCOp@tqY56F^roiuvg1yZ3RXjr85Je+d+h81JKgn9iAeAGGb>({{kJ#=;_s6+KhZ_-5E_&V_O7SE&~mrngB+^fIZ z`#N@$3hV|8`8hW1LHHdT8u6C%6c3pZ-H(0Ov8(z<I=Z3Bd=!~!OOmwdybXy^8 zMpaXkx811mGfvAmhwn4Y@g?Zqb;fiazH0J~sp9{=*s){t_7xYPBcGzVfDfnEN%(_h zj|^G%($vUs-$(fcXiv`j$2gZD66(T-{M^+8eqtY-wfz>lup#N3KKoq3i?2=0ZIK7e zvcpW_s@SjhZC{6)PUv&EkAXF=33f$KrhUGnC)K?O7-p(23;y_X;iiu9@?08dLD$j~ zKo)Y&gnWk_cd~iZ;saEteW!dM;yx6jtEtp=6nNw}RR^E6-Y>Cj#$C{?#zF74NoO&& z)H=ILFl%mzwi0ca7Tm+~O<1}?>iF!CvGgsk*BC(`1#y3$3*n76(myTSJLfoJUk?Ij zIo}53>&!^s%J0MZrVTr?`tc}s6wbY9Re9xJfPQ9%pbOEEHD+sk>4{@DSg&u$SPnd2 zbMcV!s@rMP%XyQG#j-BOMQ#5L{kPhLAM5|w%mMk*)lD`sO5*mNF#C{!x;Zn!f#u)m z%W#@%_;~Hkh5#rn9CmKZ7cVbi|nNQ zNBN|YgMQbo{7O8=3F)v4h|QM2`UlX>gsSYVr>A-%r~hQMNZ&q3bmz_`FW@VU%+tu? zeB(rr_0s3g?dhIqRCKs<;h_^azdcuZuPf{v7q-u??bihzx=UWcj#RVqz~;sdD}HrC zkMf82UDk;^N-?V4~%HZJzThc^)860Y(W&Bv!5-S=+AqAi`}zSx~?=m+cNp3%t|EW% z4YcLdXV<0k;fB`bz`dBbA;Eov`@(RaUl%zp-=0Fg36&@hO5)g(7d+mVpVG!& z))F_o%I;GZe&bmde(rWzKL@Ojd!18ZhU~xm-1TfpuMxS-A~)%^U(LZ zcCL}c_K9Dh{e2(y&rGF%=<43Jzsa@E20R~LyL>0*)sEMlaj56!KugS(dr18E5bZ2> z=UBeA_c9-#!^r(pjmR&*Y4mD7AfMnnKcPpkB3D|H7hg%#Z~ayw3t->7UM-z=kS0pB27E~^KAFW;y)k8?e-kAKp_r%}UN6IF~~A9gg$4&}bcf7U(PkG{QP34U(!c{7l~d`3of z20EFHZJToMg&&8(RZc28ixq)ZKkHE;@H_eNB*&E!XLbxZM)~t~7VEEw_a#2sznX8g z2Cj!@Jhio(yjJcJJ15=-a@=i0NA^*9@t#JGQk>5z`mquoyaC&mWP}p*Wa`UH_@gK< zUYO^b$q}SAS3WNPtWW z=(SV$6;72eC9yr}{wk^ON$Pupcf`&0iACZSSXFd8l?Z$|=vd<@-;D;WfvaP}^tn(V1k@eUY4v7m^pXB#8 z()p-~{N*a*h9&1{-q^CO?PC;D4BPFctwH7t%5rPpYCacVhj&5zkmDkp!~9d`)tZ7?%8~Vm^{8cwp1|!_I^gb zT*$hcwSOL+#ktfLZjvnA=sedc#=3It*Sd%$-3Po+CDctfZ2v%fAYwa5j-eKw68BWX zkK;FU?*84Dzoh6}=Z(r{-e2ow8D_pN!Z(#Z_wRHS>-#}yAyg6yvP}|tnp>t3MS;W)V@Tqc_Y1J4~lQSVoEcjjn9+&Lg(88Tjw)Ivho5-L%PCdw4FnT~ldk zQ@_Tq86Ci-)Vay}KQsDIw24feS-Z}7JjlM$!sB*tMh{zRX!k@^8| zMyFw)=UmCRTIWvFhZm{K*|#)sFMUKlZJ@{KTLh0l7t@DMZRhZANZvHP| zj{sh+yi(`jwFPG8A@tkUITy-9u^3r|HLPzR?L3(PvvL5@f5t^?4d+a^+cc>5@b~Kz zZ3)Irz-ZIa(Eg32+xhQZ8t;+xIpkr%Us?PDnFXI;tv~YL%N&yLXcn+nA;)whbEsd# z&+t&5Cm&~$Oa88;oF{bJ%H6ab-YL08dz~#G%5UD7sk1wz?+H=PYi2d%ffLCd17ij6 zaPl%Z>!YUnw}Xb`oGQ(#>rYg+L-H|j7NFGxTkV2zO1=|Z;7=oEH*qn zCUv?h^{`CQ2*%;yMFl-RmAJpx}*A?bt#K?&YHCa zJ)L-T@&Ts*SQWpPz9b>;LDkzed7OM9dTk4I;R?I7IPjh{B!2f zMHbDLYwdV|XBxxKsev^bvrc@%rK553{UP_K`@U|pcqX9Z{ycHKBIGkb}5>+_v! z3Mgce_ac}@u%bW~5eN%%UuEgv!YFez3r zlzMTxuik!kH_t3TEzSq=?RVF75nuRip2he{ZugjD$_m(TptoK)-AqY-8{bU)-ZF1U z3lyi~D^x}dMYt~>`-#8Cwns|udj_ANz2=zeb=GxsPj}vT8h>M&hv;MGv*f>c6Tio{ zq+H-~)huD3)P~=6%ET0Zzv1n1V_%n#;Zf#MvsqYHMt^Sb8f)aEIX~4{gWq$Ge7r-9 zyL`N>*zeVrUSBhXt7klLBZqm7dVIjQm6HdqFzhuB?Y${JY`_#zO0fWGrkK zn=6sI*HE{^TW4}FeX(SOO_Z&Sml2KKE;{5pwduq&bpH9>KO6iFZ~rCcpz6kladRcU zb;G~2%R9PY&T(~dPM=j48?$0@>0dv4+~dsa0K7oC211mfAJzN8Rd){g41pzt-?N?t zc$R0uHW+I$2-jdcs#A5pM7`i+7IrG@f4hG2#H1P55`V&$hxg3i+<2eW|B>n+Yuxn% z?}vYCZ!5gvA?XB6#{0+%wy$ERdu;UI@$Y*lwfD(s=rEI5E5tvEjcgC1FEN%}7cpNq zuD* za@zRH$tzQg+b%Bp+T^cs{bzpv#_yr&HIu_`U|d`I(56YtCtrtuVdT2|C$I6Zn0#OI z*C%%*8@CmuteKqkxeDJd?w>5b#h1nZug<;2*L!@GFPHDjQ`SyiQ+B)W)-T`TtK^5S zC)0yJ;t|u+BRiYtzRvdXUgI|4il$NjcR8Q!nvZ>L@>Q<8ejvQbR@(N$S39ot7`LUE zx!0zRzk71q?yq#Ly~w!jZhRIW;JV4YbMnF3y7m}2E4cE8C&p9H!M>Fp)6H>ZRp-Vn z3}crnqD-o39v1FA=Jd#_yA*8in5BAFjsHTmt8iFckxZWTh!O3xBJS~*Rnf& z;*q{Q_sgQCo8C>^r|Y_2m+@}@?QJvbK3?+2iW~XcT{7!?H3f58nm?iUs}H?a(zNiz z=&AGXEP3hKUq154BO73w`g@V3Nq==E|9!_dt>>km{?F^?QtoC|Lp{9goO{(DOFq74 z)}M;s|LP}Tfb`3`eIZ8TwszqK8K*tf z@THG6CZ~_%Ov=&5#l&V#N*C_sqh63?+%nztZ1fnJii>TIAE;|rIqQGEP01U4pJF`NeuQVv`$Ee3%~8>m&Q0w>GvztY z^^>giy1%l-;UuBiu*w`=Fuz5PL3#P(&`<4CuydKK$@i~7mm=;l0yX+zFLd&rOQWvn7 zho)eIp)Si7gDjr7CJ8>)R!ibT>oGwmvgEnh!4!+;@`OL%P;m#*%mT zf1zW&Q}XKwI@Uk3@6}!qd1OQ#YlU~Xjun`IRWSW6Fa_{;7=)QvbaZGA%(HlpZqYv* zpP{z@p-yym9NytN(O&|u=21r+-r+jYC)oE5o{bWRCk}5K@UR}t42+=D9jZ5-lbwAA^D$9Bsr{n#lmE#4b;_Iqrjc}_-=K_iqSCtrs7vJyu8XB3ea471w0yLV zRP*&h`p*H~CoW55wEiGbpCk+c3CqbI=`8EI)PF89OaMHISkT697BI$=Uu^` z2#*kN+C)BK>)V9*aOJb#4{uQ&Bg0biO7MT!ZwEfX(FqQ0UDNsSI69Q!XQHUzp06h@ zN=6`7-W<#PIlxtoU<-`^@U;Mvf&llxrl4b#y#DbMC3tjfc>svtv%_zbVgkEa36&+ z_71Jzd@9#wt-r5ao3#sk<=U*B?y2|&?p`<-T<-2T@2Z!(AMerx7QuQ8A457j z9A?Zu23=A6v})lIHk8YV`3&l(&u2EE-0J?de~3|^8~gVOL zJYg9xFwen@#*o$pJ)Q>NBofC9gZto}y5D&{=sK$QHv-rCPyFSN?~`vGB2JtL5%*l- zrFnzlz#5}Sz`F!bSZ#?H;M#`obv!>WlaW1L>Ofc+y~ZtrL)LcQLm0Cb9F=3-O)bSbS#<*DU!iFPaNz z_YB27c>ON9Ph-V1c)f6Zx5t)2WoRokWHjSCaUAcQYRCH)zUwbeOhy@KKV&!JBw`+d zwbWr3kzaq?3DO&J-0y9oc$5Y<2R1c#ww<8&Aof6=TZh z|6V?SJiCK*oS3hDjr7A$*x&MowZ!%(hs`Q{iIH$AM~pG$##%_Xao3B-6L?niZ!7HOK`uMh;ev{ zmbkJRHoXM*tMqO&y$o2l3l9@x==qajuniiAX9B66t&rKn#8@lV-e_&m7e4QT9#}Kl z`4z$xQ2gFzUH8Ky&+FlY?h7E(!wz7Z7=Yi+BCHd95&A@U#(6!`;74KF47L~IyNefe zxkH9eb8vj(2s?xM69_wZu6TjQAX<~OcI`jbiFBx}NasXUxHzHb%jr-&N|(Yq;ird( zAB9I+Cq@qyC$2_&;Cfk0zX#B-ljzq25uVoUsQ=NmTTc`Aa9M4aB&}Q`8w|cZ0`rAsH1L@(z zmOn}Bx2V@a;?}U`PhsAwzmH-D{ODa7dbl5*db$tbz9xRI?S)!{c!BPH<_@);c*20O z6YJN(@7TgPUXOc_`ulL)gJUC(LLBP4h_OMpAw1)$e|M%8Wt;Yfd}ff+o`Q2LKR>9h zH3CjuZzE8?VSH~({_5I#HtgnD$beyy+SzvuU1xjCNQV}FM-}dc!!drRY2mMhBOa~s z(O5?5Mc^45tP58T4@ghtqsBYqMB7gKO;<#{b;FFIwl#5BFSlwD8-xSTk7Mp0VZn2y z7`raF4gIndb-}$E`egW7#E*jGIiT+JUIO6}_slF7_mpC90`riAD*fR1Eny#{)@=~? zU_!nr5o0>S&ZBE%0sI^W<3C9DA+9lyJxwxNFT(SsJCFU%30o6A)LdSlug>?sN5m*> zYqrkVFpt(=9k2tvUX=y1UMMsD=CPQ{KN{b-M7sSWjVo>7ln0d+zqL4scLrkqiFY)1 zN4>jXt^KQY_bxY>FL7!0lZ;)?pWg+03I&539`63IW%if|%j_93#@Set_yT2JFr$0@ z2Q*i+$Czh_<9sN6H`x$>IgLYB zF2it?VLAK?CJ);nCW`}$ARE_3o81}pY7_O7!cj-q+r5--&yC|UDZOkQBM^TMrJWiV5Amt$5}Xxl+OUXgL~!q^Jfm;hxaXXyCY8PatA$gN$i!by$hA&%-esaYrHUEMOe)WAt|#3%cT-(mLR^hcSj67>2T5iL`FV z+VD_3(|s1oVAr&{*u!D@S-y6q@~M3HAzy0KX5>xvQ2mYUMuYvdn*Qr;CmZ?vKe3$@ z#QU$Zow-QMDf{a|yxXJ~*$(v&vY%ri=1a!Hu2erETcP%#w&=cbY$n+a*%sN+NVG>+ zE#iw2up#OE25xV%i{W^7HWK%O(EojjT?;mhfV?aE)D!4a zR`jVE$T#X?%h@LMC+Z_nU9_{iV2m8SF={08Kb!hW1US9hd(SVwFWAa`jmAk&*E=?t zksj4!1^Szd`i1+HY3TFA(dU+9PkhPx z0V@)<%@5N!Bd-(k`6;WA?^aueXKQFpUtJFzLcFcQVO+yrxNYZa0&_|&VuhOfQ(6=b z{k`75CZJV%@UGb>ZW8xcPHt}S#^QbUc-EY_70*NEDUo>q_2;g z*kdGrJ!~J0nRw3x-Fqv)t)aqf93aEewO9Fv2lzKbr^-Jq#2xsSK+TBtYPhfBP@{^kj2rVZM7wF?Xe>;!rP5MgC$!NBO-PdOTj)6Fo z&b*kv2-Nj3o+CeAXLowwuudRY7P?_ocJcR`*ZZA$OQGssmvzdA!0+;h(B zbOyN>&sk9&PkelOJv8Gw`r{eo9>~8wgWL`Ir)QAUb^4<-$jR0VFpo<_xVO#_&I8-H z>I`;~2wm(sLwwaAE0IrxsLMW8pA`Rb|7$c5g$;6Y(;2U2%w)Wo@lnP)#)FKlj3*h* z2ASSS#yH078TWURayRphjE^%`Gd|1M!1yj>Gh;iW2|rjMH!|^KjSdQ z@r*MWuVzeVT*Y`J<3`3Z#%CGdX8eZH$n9ii9L6}2aRK9U#KAHyc+c;rj#s}{`kPnCa1Al14(H2{Lf?s>PL^m7JQBGZHt{%IYL~zv!+mDE>># zSuFN4$DCDnjm%a0P0W=o9%3$gA07q?;w?epTZ80Z1@X2Z-p*XPef0dt+xhhYW zAh{`s_X-Mc4&wcSxP>|HHFP&9NNx?{BZ7Dob5-7PLGtJz`NSZ(El55!NNx|}aX~yj zDE@*Vo)E;Z4dRJGJSB)bgLpb~r7u?y&kYKnAH>%MaW`|-zY2r6hqWL6wCWY#QZ{zU(dWR^JeB2=EjXOzy8dv z%r9ad&s@cKG4IB5H*=-`o0;Q2P zaTD|IEN^43#?RpK5?}q%^H<@`%vJi)%$0q`Ggta?G4I3q1*dOhdsO3F@c0zmzk|mE zYf$`P`!D4DdvpFvmLxfHQd5-r#F8b+DXGcnK$jyY*@2}O>6R?X$xQ)Q9!r+wI@2fs zdn{S9)RB`L$XBH%=VoOqAEcYE$dVXVT%!^9{c**B6B6OjfzR2c&t0kjSk;;+E# zaje5}5hAU_$uP)=Z zyT+B8wNwQYiRoDoSaNb5xoOFkWqFS56kV1`O3Pg454Vci8(M*X9yJh45@mBqbs<7< zh{TlCrH;IGn%!lpsFvjPtem{;R7xU|Y{8Lz`uLoFVDz+HsdGKPFMnU^fnw-EHz$iM z{^?R7eWhjR=0OuFX*sTRM{;ULYG$q_!{Ji(Om$lRq$pDF{&0R>S^Pyx^vlm!q{QSb z*P7EQjr5k6l55FM&CScs)SD@Bnq4H$Td-h;UJA%%yZWr6BR_S*WdF*pWrL(DDiX6Y zGSe(%Lpf<=6^VW&$(|C}s8qgS`8C&d2$7iK$aE~j9m1a|H8|WUl}h>f_4)_R`L*96 zoYH@go&!T8e9D?kM+Uk^T4q{q8b*$_so9pLS=kn47fOzM9J+}KC|^J*q~ojS8ME_+ z`hqsCiAFJumkThi zSf-}R+ce+sAS_d8*oaMcY5rD z?o@tiE)^VKahlr)#-n>DI%v)njPTO^{|qTlim&pf@Z`wvUvX3>BMx=nLFo`z@d=5O z>ny>?iBfp-SNSWXc;v6r!Kp7kohzsERY?B9`KhqMNa2)j34=L>50n!Rc2q`+r|K5S zm46^oeB$I(oM0}ce!1c*TreuRLM0DQU&;T>UxoV*o?Zi1kty;-I{q;6g0p7krKihN zox^VH)m)xD&IhMHsvMs6=)ms+SM5e&=y{GnPS1J-a(dPyklz!;?+xPgoJe5!9fpn^ z-w^E}@4^LD{V|+*hR6}gB1>e8RQ!z;DY_1@o|ct84cD)Dxk@KQyu0H%Uu_UK)=9aY zv4Phi6`IdLZ=~Fp4xcEw@^^Fk zid&ega7A1%C!_Kod992wGC39EM(2~6 z$1;xxQk;40Kc8_ikkY>j2j!iPgVM>sVZq_T(GN#94l1h~2bFa_#4#(GBan3Tse)-sC6*`LwL=w>WrY+`gkV`;eKqy7~sR^vPqsCrr? z`k6!IqQ~Vzk^zi_#z*77JOrd?j=vP~k`OOjPrqYa($T*XKRqDdG01z2Pq}{{KOJL6 zGIB{rIo9fRNrMm7QI>_$8~tCBUKZ+^gS4r%q_MypqI^00S$$p56qPs?Y2_dtr#|92 z&<@l-e*GyO{8@UGMz-Fn%R0nYCHnLDYoM)UltblC8dI_VT>m*@4N6W{wG{OVOhan@ z&+?ZRnT=YgxMX?%dj9#k7NreS3bMWZDV#cg%Fo{iGQskY4q374BPpoO>Qj{I)ag;& zWTZhYkqeDPA~n(r*+~X8lc(#L`WV?}$Cmcjm5eA=DP?jlEeUTSc-`GNj)O{MrVe7Ehf z*|>qn0>*sYEv983b zC9gqgc+#{WH9I3M6YFSMnX^){2bn@)mGgMy^+aJAbA?{!3b!#=SQ{j-50W>UPaPf=6{RZ1SYImZ zx0kVjaX(`t5=w>WptYxfcY+`I?Y-Ma?Y-bb|vYbXn6Qh~Y!f0iTVvJ_AGbS>+7*+ai z=7o$N##+V(#wNx##?URYTwNH=j8TjUj4sCcd6{BXR;E~hab+>~1{OH;#PsYmF&~qD z$Wk0@1j%)z_CUBCA_$u)&&U`4&b(}0fOEw6g-1Gh+0165|4mO#60;oH7)P>!Ys5_C zKQk{~OwL;-=BK*E9Bf$3&RQj=rph4^aj-VOhT>)GCrBUhlbt}Mkw)`(q@fRrNC%~b zf8MgZ9A2>5D(*U^uGon1;bp4tx5T34zgmhh^ zS+h)6#`klXPL3~xOpmUMem^9`>C>#)AD8@PettihFU>6ceiUvdN|33CPsJR7>ft~b z=oZ&eLgG}ynOsjoYKzIhWx)9;kqbCSB4Lz&^p9+6!S($#Go+_m?9sWs3+3y2S zIGHcaAElG|r|Ef-kIY|Q6BSQq-4U1Np{uzx2w5&UZ<6;F6iSto=HN6-6GE1kW>?Bb z560#1=sy{yBp1&}=dPL z!R>c!xbv=!cNgAs?|qx@f8fEQhaP_9(Z?R&>?tlOEi13s;@w(VwQYNKP3;p;KDFcN zzduvA^V#Q~e__{)^}Ao%v-jm!UTxU-+CTQc{>Gb)2M)gV_B-#s*Yy4eA0GPX1Us}9{b|UufG1~+qUn%|Ka$LKmFYP%Rhhp?ZnC7g=Po|4eQj|7#?v}m#$}b zGj;FLvsdqP&NcVxdtSfuFX(R>Fz~`bgD)Cl9Xf3Ih{%znqDGGyJMQA~mqcHB*#zuC zU2bzE;Q@rD%baP;SEOfTX1P{o=j7(CTAjaU?KRh4cm0yY)64&|?uNhq?SH!d|I_*Z z+xnk8C3fnx=~vii%)Byg*6ca)bLY)puyE1hgsZNW`aiw={|No(i52=7fJZ5Gr!ktI zm-9$kqaN&6i=5!BCu8=vc&5|o(-a}I1VcwIumw7MdwanFX zLUqh(jhI|Lb6O)O*TB4wq@t0zdXB4!xq42jnK`XZlWS#;{Tbc0F~5L$J9G7%oOnUD zr-kK4<^!0Um=9!Lzg>pEkhz)VgP2>G4`yyx4}<~HUw=JCuYGf!k5%iP6$D)R#7)0h`ApU&LN{0ioE%x5rfU_O(16Z0#X zw=$1o-p+hBbK^_0{&Sd{na4A?GM~phn)xE;cIJzjCosR7xs!P+^L*wjm=`clXI{j7 zJ##N}jVB;=%tM(Ma)0j3yn*H6%$t};FmGkvg?T%36LaGpsgIt_&CGi-w=zG6c{Fo; z^j3Fv=I1j{WIlqqi}?iR1shDb35}D%oCVvJVA3Z4`rUuyfgCx=HbkXm`5=8GVj8?j(K0^ z4a_GnZ(^?T1h18ODD!saotYb7mh}&3Ze||A+{(NQ^JwOMncJC9V4lca;|ZgSc{p=7 z^9bgJ%)2o6Fz?H}miYwc4a_y3P&P3SXWq&@f_XdhzRZoU$ofuTZegxjWcx=k4`*&; z9>F|b#b=(V;t!JXT`E3vw~EiaP{n8NQSq%Zeyxhnyk5m;-l)Qlkl~wEc;;;?Jah4? ztgjX&!<(3gGq*60U>>EykCWkTDm?Re6`pyb3Lh=QyOf-{TgfL%d7+Xs_b9nd%4?OJ zdA;IOrMyvbyX4J^$4TC%c)aAILDoNXo#ZCweVJRB%T;25Ra0687J{CgAg9(P?YwfC z%xjvo>P(LIWy#U*tVCLUCN9@L={XQ`OZC-R%=bCGTtyd<%5bzlPL9?^$;P28Y7p=<;mc3IG_P?D>xoK6+zC)@##4naw|Ce3QjKvPaBZS z<8*U5e;22-jPs%0d2+NuPHu(13qa3DDSb$J2Kz7L`jMVjAUr*nKq2WF0;wrVh4$s? z*@yX9kC%?@BT`He>i&o9BvMTCt!tBgM2c9U?nlT@BC-0e?tjQ$h)?76- zsuzWegcT{f%h&bd=WF%&eop->Fg*3MNLZ<=mwXCoy06`+-%&kOxij>7&hpihp4-=7I&2% zL5?q!qO`D;s1Ipo3b%}W3d-NE8G1X-_UT!UM>E7+&R>ovvA+Hy_u2gAqw5gmmcZfV zcrwM;E_B_Y@k;eKIX?L1nR@*L#|H`@A7Ce>zxnX#Xy3A5`^T}seh{mVwds0^q@DTu zzqGR{K79w;O<+8ww`nLlIjNUvKKql`ZM&~ONj**Wm0Rj*vTxjydh(CsQctnI{+FZM ziL7T}Ib`|$^;G4GXAR48#rgV;EZ2NrIc2%x0?I-5=FeA_>k41FRk>7q$?0j;JZ_$a5J1MGnEn;mf7Cm!#szki-D zuk>UWfljR(t8w4O>(V1Qkn3G}pCM=4ct(-;F;p6EmN&6SA@dKIdze4Lyq39#c|G&H znKv?jlX)}qcbK;^Kge9XC(E~$xrzBs<`(9AnMW}{#N5VQ?GwZ^-_Phd= z!1@}>+|BYwm=`f$!Q9N@2QaT?d9sqTel_OxELZcuM&>uLyqWnk%q<*W?Gv`MT=k!5 zmWOh9W0TaMn%|h2-^OwqhYw?JWw{!s?JQUOiqR}D;qZ3m_b^XjuEu32^QTyz&%BDc znb+;re6fJ#8(1FC`FCbs#B#MylfZH#%e^cwWnRbpb>fmS4oYf#pu-PHvwF z=1nY*WA5hoBbm3dJcqf9OP}@>pP0&1uV~H-o)|E%!^oF!`#bU-JiKR zd{>s&v3w5mc+Sttyn*G*nVVSNpLr9@9n1?j{j-_3vRp006|%e=%iCFgKXcWCz9n>maD6F5z9xjJeuYC%+)^hWz6j?zmd7;d0C$B%#A$0Ud}v$ z!{5%_$$Y+w&-ssGp3m|%%%fR8n0W!qmoblG`5@*+ESIZjLU@^PWVw^W4`*J-@(kt; z%x_}e&f(SbGfgbNmF2B0pP)F)Z)RS|@+g%a%O7KI{7C9guHNA}9Oii}w=!SI-21!? z--CHH%O7BFXTE@W0`mu%*K+)x%$+R1i+TMEGX3$)^I85^=5-vu7xMy^FH-SY-kW(5 z%NH~EGB09Y$NWy_4a_$)Z({CMa!&so=B+GGVBW~`bD6iZT&^nMy@D^u`b}iH@ngwv zVQyx=g}IgaD(2D5A7$Rm`SoFLXZbbEEu7y#<_RoMVeVwUi+MitJ~L@Wvcv$MILd9qPkP6TFoyR>GVGFQ*3*qA$6eh2eLjvviDpXF}mO)S5Zc>&AUGjHee_G4be^6Qy< znP)N=oZk7&>sY=-ah6}qyn*HGn5*ZmEX`E=%W%w5do z`YintS2}sW>%e_{AWy<7Zy-5!`wXkRK4 zrB~_8eHgV{NBdT^{y@LCl}@fpB_Rd+4X8gIxnD!81JcR;nq;4zsVtGuveJ{>ze&Mr zw{&v7O#bGDd?fe(&O>tg9~ho~h24>>>!4Z>&fr}qxgWI5xB4L0Z5&u7mQL~{UwI{0 zyOnBPDA`wjDfib$@-$!hBwy}pXUQ{z(s%jnQp(jXoXU@WrxI8`zn#nQYkhVo*Ja4s zRCrp4p6Ke*X5Bd>QO2Fuc=eH&UK~-wp@LGkxtR zF4?ETFO@iq(|%R^zIK8p7uW|lyq`GD$S=) zxqqm3ooU~h_UqJctK5eseXI23I;-D)6j$SrQ zt4{mtwEw7f+iCxr_UZinQSK9}U3HSv{*%g$IE{n!Upl$J6)2be%`YdO;j=%v4@m!| zll#N!=>z#ZLZJP~{UY_00?8@=!0?p5e>{-;dH(Xq{bu!41HH$D?45p5C!OT#X$O1| znD@)nh@rUZ@rtV+Dfg?@Qxfty3Hd8}nZDfb^Or~N=lJ`N&l%Y6iu0w(Ho^&fhh2|d#wC32tR zkJm%#p98(0D!lX$ynf1kJ(WAvm+V!QmN@PE(pwKyd8iB&LXLoPzt*pB`NohyPW%7r zDHVF2gKF=$ANf20{g+PeAFHQd=s6K;5BfExbnv+K>Y;z2i8~a@2^5B zq?7yb{_@Cu0~$}{2;=9n2Pv-NOYZcQM{;_@gLIPT_{ImxbA8uOx$p1KpQ8!Ua@GQa zPE5&>*T0I-bLr0Z#n-vh8y_99xeHOpk2|YgEW38kcSDoqJU`{>6XU}69=vVoy}$IH zkUmw$F}+`yx@p9er1!LG10O#3q9JqWp4Dc!zw_?f7Y*vS<)1Gv%V@|sl(GHGY>gu9 zej{Z_N%CvvvTq0f<-qH%Zx65NAM?dyOXNhmCG+H-*OzyBe(9M0m*P_e)2<%eesJZY znJ?}6pfmPJ?jGqi#y(lOuJ3@2wm0th;jB0I-aO>2FW2qaG?b?9ApCvF->`HKNv?s)FpvW+#1wr;+n)A%Q^ysml4 zA$LUO$u`^j8@-M0zAG-o#VI8^Zp;I(F23oVDKlFf1Gm0E$u#kW>>ob8C%5-i^<68@ z|0@0cVWzh}Lu#g5F1W@qCo^r>U(EeJi4PqQ5yDY4Yf|K-J3ss?Ym za@NsMu|9Ioug4n)&WpEpd*+AldoJ$TYs#S;uDkTzhx%OA-Y@!I@oulDZr!=Me68c; zGd-3&u#(x*{rtO@?O9~$zv;3KW>ACz(KO5?E&%0Ax*H)YN%7KBxJojF;|- zo%`m#`-d#7`sCdqpLrjxcw!Rb3ERxZua37BT==(_Ga?eQLSKyCb6sS+-Prw`eph+^ zX>B|H`sL*#`*>Eq`}}QF({9i*e=$9_sPCG*vb%*i^5o8mc?CD#ch2RnuefZ-?H}Lv zxN-WlS~5YUDR{;;0NFP(t7ik54^Q{!i)>5Bl}-riQfLefj9nn$A_fIoQ0at!sFJ z_vc(w|I2>sZvS-q#^kF^pa0{**0lF7ipg%-acgDaH3Kgl`)FS0wO8JA?25?MzhplA zx$UfT_O{>K#niUs`|jy`22a`W%f83%IV-1T-S1(yM8?*pCJt zpC;9{E-z>@eOmN-!^;bAH!b}8RgG`&`ZZznjddY?rp1L^x^GsWZQ|wrpVY^#DR%Fh za3J!DW9{SOXba{uMg-F|xN9!sxF@(&!Ia$xtz17mtGK3sFh zl23nJz2UXbORt$XDrC}}&!xU~}9?Fi(6?L4Yt)~+M(RYlDCI_b{8e6{n90k)-Y zi<^2Ef0lQ2|Mx>j-MpT3({k?WlwW^*YvrU>o@1kW-LN$7_}G!()%~!sYqy?ho8of3 z-7ZSB_0%Rt6s+7oAhP_DZ})w0$Bxm@O_?0`%e(1e8yf1guus-~{ouU4jy*}0O&{F$ z@fTz39L}~)FFd@z=kj-U-xaAFjHzeq__eP*KJ=X95tq;S;mwNj)uk8iD!cKTlKrEy zO&9-d>SMd=dcNeHJtl77_OXvY|LnWeWOnUFg%A!%9zZ0H3 zH~XVL-kRxG4}15GLxy1|Kbro=U4wcK*?#cCXZk!Bx_9{gPS3pdrVGA{W5b#pR5n>x-aj>-0R0bocZ$TX9~x^JjL3u zaZUB0kI$w?N}0U-;evM_8WH<`-_PA8BPac|V$zx!H^y%Le9g?yP7Hk5^=|XGZ@w1x zUfEqQq>GfJPaT?Fo@w+JKR)g5+=c}ke_Q-Y?*6y;)J@4uet+@)Yo5ON*vX#hj=6jL ze)#PDPd_&MyW;rIe>t@BZ^@m9ZyxpWV*@EaVY>Z=>nAR{WcAVL(tk{v^Z3E_BOd>` zEcex4_B{A`zva;_OSV0_;fK!u{HoxcZM(0Id1HUgEw0P^KlWBeWNR1m4d-TUO1u8Z zh>a=sON;OC-rQ|m!o+iTZ2HMQ3B+p)G?Rir4d0 z{|so#5X^%@!-W>wRcK*7gx1L{44p0zhR%b9!5Aq*j2DZL@E8#iVHcqh^F-)b*NM(eZ@?I*Nu{-__;sUHTrOX#YY-_t=Md_B~+kT$%{aE+EyT@TYd_s+mo_T|tb^n-b9hFE9;{0RBi z?^&Ru=d;Ikv@L0s{!ybpA-TAIjgHO7>UFfd*ZHWPzIC>a_09L`m}q!k$L71wZ_(4; z;Ly?5UZ$gH`dmj#TlN#3Gh==_{?HYN$E=8H@Av)jmZ9k}SDkm*-f*iaCTjAk z9gB{~#T1SG`){K>$uX1`XNK{LI9dSEpM)4c(R% zvun!U>RyjrA9K;l*&(ght%!N+swa;u-klMX^ys_G!^0-Td=&TWKhGI(ZOj$d{4#j| zlBF^6_Pb^nAHF(f^^)mNT=7{(%=x$e{Fn2#To;r0VC}8W@zy0c%d!FvU%QPoB=Dsh_dF#ld88K7#ZtXK{QF2V2(f;H1PZq?i z-PdsKr+3D)9-?Ai9AbDbrZyv{sP@s?Yo2k$3_QHE>YS_@F-iRsWA)Z|8!N% ziiDUR+m3uO!agr%;+l)Uz4V`%G3i;$M$`Baizol+naG8B;%^We$MokGJLt44j&&Fu z-EEq1^Gqz7CuAJ-zYa|DJ6ypD=HDpybAtVS{J*O_z6}1o6KCI3*X#dQswTWc7}w>I z_!I%H78tQP=Db#ud{9OvrR7?yi56Td^e}_4D(%)pKH6?1UR9Rs7^H8*xUC(-B~ZY&j(&Q2?H&9U=;?`@0@GWdhcm&?g$=Ed zc(GfqgYBPmsq;Z=*RD-Tg8GE`f%Tc4n>=X7WIa7&2fwRjdVzimWH@sNKN-#fKNoK2 zN9N0X3r}jI7qa0t979=_b@Urces%B*XzOw0N8hd7hDi;_9Z!D3gi2syF;XTkL|=oj zL75|y_`PmX24ScCT`GVc>x}<;2#fLEDZ_AYA~8XWveYlv-ZSpIbzf-1jl)`w9zEKC zyH)S+zyIDz*o zQ^cFDC4N)mjf3D3g$H&}8)3$SNVcKU{t5GO6a?@HT&QqngGeH?@dy z!bPa3ry-nWxDbbM6rn#6qMW|?=i&TZy-Y1f@wr9gK>d7R%K)7B=JRBvOZ6Ek;*EOw zZyDG^Wu>$?p=G={_9AV5l?TkZ(EgJO*d9BO7JN6kHZfyvi(z&Gr5m>>EG9TI3so~}Z z!)>Zv>(H(R$Y%oAf2N2s9ce^}TUzvXZ-9KlDg4Hq!mqpD7JhvdV%G8~j{Scj?a_P5 zZAQ8kqPC0#g>LXJa~r-i90rIZG?t*MvfV_W@+=H}R1JWOZvP7_sH=^D3G4mFGyHC>XwBaQh$1kICpuvC< zSY9WZFkUzV?D#Ikck6ob+i?o~VN4(N2DtT&G` zM~@ytes7+}R^)Z5W7|zX%{Y~QA+F;%)Ht>O47wVJdC}?gIQumA7pL28$7|`5Gvsl) za;6N0u5qYxo{x6Jyr#og-aJARU*TwFJ>%Jw4&$r^3#*UfP~+@!=ykfDhTm2({+LYU z$Jf(kq45@a-_7aj{j0Ebr@4H89JI6aJ&vDoXwuY=hH3E@%u`Mqe>mzfgyb0KX^zNb z{Lgpmsa1ve&v)yk9{%6@ZvA^zH@%hU1RQVADbRPQV(~3_-?yla zidg*{^6W7gPlZspSy?H0>H1eDX5;e=@I4laPe8=tJEK|Ysj<1)>2^nEN;=+g{*-n_ zY9@U$G&MjtZ52N7iT7}a&dp2BUK5|1jn6gWTZ+jDlb4APjN+yjdBo;rXCoZvJR?P1 z5a7E&|9T=k#WC`oza|H-SYMEavgFXWFX&7ph4{ZfF+VOXC)bV-E{e=BlyqUHQ!il3 zwESeeogJTzKrG6+QBN(`fe(_pW@IkST8z7PUwvhz77KCZw0X0q#f=>^GKIdSdoXlf zZu-K^)%ZpOzHH&3@NxLC?zHS|`Zn!o$}fOFs`YvvQlHlu4^Bie8sXm2ZzK*s-n`WSC zQsUAw(ai4$`79O(rM^<6YVduIPK)ua58X0Eu3vN1CS)nlT_5u8*tQ~33;Y1uonZ_3 za~^&>+^&%-;?k1vNy;^1wLfK<@w9xTTur`YE|XJI@NHo{#B;ld#piBQxp1u=Z5c7% zU>_>pVlju}>(+zsX^Q2c^I$GSg*Ykety?!tA3n=;Dcdwsni>M*a>>uA*!6S0>BRM? zZ;Q#9Df3-vnYzu3$HNx*zQW?6a}-5dMKLj-Rct1vC;dW5T$TgvqH9F2|KlCM@PblR zg~a*4tpdJUn+_w@bs}!i&!?y1UCmTUJ%x?B-)t;lN{!qSG9w42NK4Vj1bj1A>>y1{ zj*S=MLDJToX|w2jkFKA1>M+<1n}u%Bg(Uiut zOnm=0E0aF@D?b90gOYC3+xt|aUeuog1dANmX^tdnzbi>HKi6lA?@{V9N%}%6Qr$s5 zJfKi}VQrm!sIrrD@mZXtyxi0rT@wcK=jNT0m+Q*QrFxoV-YIjIO1rp_IGx64rBN59 z58D)@-{$2w$#+WL(xs`{u)|p@m-+aHEdtHbErbZh68(H$sHL2ow)Q8^ z5adGGsk)OMF=yBJS(M+uf@DwRR9Lr&h|r%UVh<&&_sab+Wd{l=Sm3!z=XBWK(_lMO z`2T5naJg-LPF-GGmDET5Hr^*JW(-c?Pk3PuPQRB#eN*aB;{1DJ(`lVd*K%6-@8$b{ zO6=2rxIvci^dA=ZKcxo$J>@v9S(s#gr!@=9e@}V-hg1DuZYG*Ry>VIUu?(A>i8rpf zaR95e;;=j^E|<7|b2C2`OMgmx(+-aXeN$1 zax=oU0xuN~%`!sZ9bOL237i3}1yXvAz>h?d!3gDF4DJSAB@#oMfj3Zk*!UVmdWfRm zM7C&7#ztTt;TEkrqP*DmAWmzj#fG%drm?h!+$_xFgcuC80i!7$Y?x8G&5ZHD=i%=H z_SRao0z4V92IUi%$nx5O3lW|khQ{}t4N=iTlxwMmrb|&i!%{=-L<*l6g7222e$Ao~ zI6!MPG{n$vidr<+<=8*|5qboci=&!piV$1HQbQe((zC`2@fh-J08T}?#%V%q7byn! zbkrN%gAMrsh9oWeN+E7RxO(8rz=Akhad2p+SwcLmL65T~7RC#4zaiORTmXFm;}=tY zN40ifZ^I{=`D!8FLA*BL#VCK&b(CL{)(YH>aNg^Y4(ido1S&`VMhEPi$_0EGDHbK+ zyWgl^eKOTAF*F*hVqG9NEh9aleSt;b7AH334XFn2ak6OD9ECAPST7nJO_k$Q7dt8g>ujjwHCrZfw2}i3-RdZQun~W2zVFJnoH@ST)^v~ z?{;A5Pnw9%!=63TOT>Qteqi)!JctSO0v{5eXbre~o{V}L*PvWL%Ub9g@#=x2MWWUQ zybJji{)O^|-2?5E?mF~mpd0vtw$xzzE9_ZI3n}`WZf}~&P3a~YoWMxLD*}?e)dKbM zua|aj0g_(aIy$swAla$$MrlX!zyjn`0Nf931l9p;QIKv;p@R zKcl{PqMShc-LQ9H{XIgwkNOtfi|1h}9pGrq874L%Ug)t-1sLDH(~gDYKa6r2j)fFG zihT;yC;xHmX(E2@X4psQvCzaaj5FY^z$ZkrR)p)**J!7L3bY&iJ;3!Szhw)`3GN22 z5BWSqR8jb&T0O8H{%zanH~kJRemi|X9{roq3w#3OKti<;jR+TA13MN+4Xr>5Z?A=Z z(63!j(m0T$H3P4LfAmu{j-`b}?V$dYq82``*Slk0#}Bl zh1%Yv@!(T~xe;>)f81Kx9^9T*C2etAzb%>?H}R#1dM9Nb5FqfBPcKNPjS5iCVYnHpn!>=!+!sX z`W&Ni%VDSm-hy~M)-;SKN3}-a>(GnGpy};y4nf=givAY{Im(sTS<|nB ztwv4sgMR|{$k(8rjlg2$?~2ec4q@B??nZvDvozRYi`ERBi*Qlc)3zaAKF|Y<>Pq1g zwR~U-IPHl)g!uU;O}q`>+7mhlx_WEkh#~E)cr)4*+}uYK;aYO2r7v{*8~iRne4y2W zxdt#{0Iv1Grh(Z1B>9D!7y+~l(ZqI)e~ng6Yy)q_e)4XSY^WQi>DTS(;hNY9y|n@F zM|lgdm;5%uL0O_5SO+Y|IFc}u>g6!h0cT+35u;G16BwtXG}tBP1EUF#Y6)XVZ!Ma4 ztR}97yyy}<2LOzof${*Gfi%vx1AA-92Gf-q`s)#`0eA!2$36@7LAb=Z*tZ6n=AmtY z=7k!r%b$nT0|!uiAo`D?d66bw)>=BXCusWkZ@&s0`ZryTHbgw*HJU!InXlDg|INY$ zd>Zkr*J+q{z@C6c%)=6vP`f&`Cg5nGF;Rnk9noCC>!^IGntuH+T#B+Fym>k4>+_IW zAdNG&6&m^#<{cTDIEH*InVNn*Zvc*?@GjIl^fQA!m-1~EiFuk>0DpS^w;uB^dTvo* zenZa~UJImW=kELw?Z`;av=NVHq-VEAU>u}p18xS={yxd+`6uG^3=?r1ka!}ao6*Bq z52XC*`2h+?`{;zUr=5%P{jYUy^aB(>zoekVQ{pYDEvYN1FKH@i#@$j=X>)07X1T~S!!si>`}uV}1ju4t5v!!-R{g%cp&0E^G2(QU&@kV)V-gs}K*X4D43%wq1t+(FW=xz44dBs-K zR?F6?t+uW4TNAgswz{_#ZuM-f-CDo3aclF|wymPlRB5S?t!gA&lZo>6Xl8a*gSSmyeGkv=y7^no_vqn zQ{XA|6nR9kvDj2>F18d~i=&F8i*3dB;`ri(;>2QSv8y=0*j-#uTv%LG>?!sZ*A~|m z*B3VwHx@S)Hy5`Sw-vV+ixOjrsl;4jDY2GBl|+}=O6(=^C5a`@5?4uni5sn4SW*MkuPEi5f6^^|%` zYfI}&o3^)ZZ{KdLHdkA#qpR)J3DwT({OW@0qH1q-U3EisQ*~=~d$qB~Tw|??uCdo7 z)HrMMYYJ+LYP>adH4QaQHLW%6HKG=M5V|pX%+O9WG?M_WMHj)M>q;6>!&cPJh?-eZD?4iB zL~ROC6EAAfP}&4LZ!a~LnaiwY(Pj3sgfeGYepx|TQJJ@_uB@S~sjRiEz06o{F1MCP zm)pw|%AMu;(=P4_N@u9yL{MO5$vrFw$=nYYu{?DG*?58T+a|V~wp+GGZMSWY-=4VLwcWkFaJy%F?e_ZZjoX{Iw`~{IrfN%d zRJE--zB;kmRqd`WtoBsbR@YZIRyS9-Rf`%^jin~4##R$wlUU=ban}^qcxq~E>T4Qn znrqrHKNpR-P5^|*S1PQP}R5YmQ5=~qJi^+zl1UE)DtPsG8blp-*!#RLg z0>QIub24npZS_`f-)#|`^OhvIcH|hGc(UT^E}VYJoC(K^_@EDjE;rth;Yer@_4k$X3I);E0Tn+-Zb>&yT9Ew!oxAMfl)C z#Z+d`C?Ev=lGJ)}%Ksnx|0)G)SVf_n2-;d?f9!~MzIJJ-=wKb=iHfW2QCpC29wSNm zA=ztsPZ<@3t`2ENV!bW?>DIbrM&c*bCbqaRzr~R~Z-r)Gl=RG-p}ugUHp=<&@N<(0nQZ)pc;t^f@UfdU)Xs*a2q9hEj|W&QGS`}Q+V zaO>`-aSJ8yAtP2OBNk%M7Utt!$6gfgmQ40WtW_PKSW_r1oOg}p=vZj3XtBjwwJ#E} zLMNCsnAlP1f|%AM?kIHMo~S5v2kZEcbEs|ZrY(iCq{N>@V-R$%P-2fEhzyjmCh@Sy zXyr*PE_By~_)26%86IRFp9Ya zkBHB1hGv2=t60ZM@2flwcy2#L^lN{0=%0Q@MjzL)ErkWFb2Ro*N07ImOt!9YmgwvP zqh2yflmnST)@Q6JEa3kv=;e&Js^gRcx7ile@Me+Xh_`B=wJ3|47Z(=9j;P;M4jj9k ze|zYYnyGIobV=&smWsjxVw3XZ39!ke-gcWd&f8i_T!k-^@^CA0*s%-oQ>v?$_-7%o z(GNN8XT7J;F$I$PlNQv_oLiavAq4>E7I2dQhlE)vSn!y;BSSI+5(`L`#@q*i(+$dj ziMR82oKzstr3NgrEei6^p%m@2rUhVv>rK-H3%XZ<_rUh;c(a2dG)%uOg$_v@w_EZo zlu%GsR0^7TQkP%ahpqsf^)t)7t)|hz?tCY-06G}<$V~#?~heoED!!4L2akG@t z3k(cUbnpdgqwk%>`n0WAC7vv#rU2V~=4eiAwpse1#pRD3*UlGpF7&;UP%~^>-Uq9> zqqY#gDuX1JM-HRLrTl?xL1RW6Nzt8%&-UwXtnfuqbCmh#A4k1U7-%L3c7Zx(d zuhG+B-K2d4zoLVczO#ufnYJw*t5&k#)-NC8`0Z!%(?;tLH)1SS=)o%@p*m1Hza}bC zH<@{EP=$nZv|K=--J~^q2RFJ=UsK7|s*{A7>2riBOI3;B9VN6@g>xk)TvhhOKNMSeI?sb zNT@@qXG7GtAEH*Z*I7+?xX^=^+#cd{e*xu~c+oK%QC`k~8T1Fj`XSd4etuu)23Ly= znHbY_evF#lbUsH*M$@PjpbB6kZlDn4%Pfry7Wc9o2a;HxjOT7A9>$Ks0zkIpxF}-P z4%J+UwB{X!v+x4JvU3b1r4SD4dNJCLBe`P~a!!^eLzUU1z9JMV04G)5v%Ur4xPh4u|g#Tc-(r0PB00 z<2OM*Q-Cajywm3kTRsx?bLZ%s;?w6_FF__SF*1GDPvs6}`fAHZ`iq5gfrnJz3GS7o z zuc4jxwz7KJ_rCH-tFJ@z@NXc!Qxa$m*N_NRtT_d&`a7duuH`>P zd%}h`&|B{GYny6$%@oA-ep8*S_YbNQ*wMBx)7fGDgK2FaWZ0dLJaOehDVhGlqZp`r z7Zz^Qt{~7!1nOpo+s>!!UDMjSGVG3o1-MTD`U{)X0)7~njK(aO!mRqVS*o4ZX4diz z7)@AzBH#*!OwC!#-^rCEBXGL5j{ilBGXRTvrZEnIJKR9see4J z?c)sQ$RrZUz7CZ}&@|&(WWQ6oZG7`cz^_)daDP69Az2FAzJQhYnWChD$L)$+G%bx z&7w9mR1|a&sE6y@t-J-J@>a|Gc{fUJ9G}Xg6OuprJK#0?Jp<5bfq@`8o;q|BAqcJM zjQ48!rGWu;JZ~b>4p%t2&K3ny&6 z)6|T!KS#l1EVxey4i&6w$J3D-fJnmY;zyL&4M}ieC4Y4zD_DwoiJ30 z5_=SJD^>f%DnzwDB3tkBl@`9nM7`62cB()9jX5~Dm1WqE>OH3SAe#W)`7_C8+pfMB zJFd=-^{P|*4uT0K)k$y3)D9UFllj~KNTvgXauGr@LJse7Q7R@RDA(z2H9$+ML+eE& zl=voVrX{czY;0=zrsg&^7y3DBQcnlbCHkTK6wlSv)Ot^a>WupS(t25KWYtdJD_Ul0 zy-WLUG9529T3YX>gnVr^CFHB&()t2Q@MyPDf=8_?tuNH(m)6hH=0j$@t^Sg!YDQJ1 zuYFT*)BGE?V&5z3C3>UFt~~e`G$NV?B%)>wUwRqg;i@z=IXRJXAM6bDgMFlKS|1}* zTJt0-&ot@>P~uYMKt_iv`@icGQ&50s{!#;tR+P0W?sZB=UJS z28Qw#@F%T&Xsr_aIZ!Op21>PA8)rgy4p7O3{6Pz%JAtoM$hIO)F4a7n)$ z761{^!~%XE(hSewuU#=}f4+5c{H|(n(tWZhp^o;Mq!< zRjo5}SyjYX;$XSHob{6zO6oY4v*QvB236~|OfFpmxC~b5@TKpZgpU&#G7W#1xq3O3 z<3MV!e|?(f)~nX1p%Pni43kl^-$5TcR@NVMSZL^H&E-&ixCRksAc zLU`VdHD75rv;+qczU;=DL2Y_V&_vjEBUm9@4-7a;8wVN=CKo8r`Ay}yo6Te;LW2km zCg&ma6+&MnuR~}6p@HNqtG1-l;zB9z8^>xc|3Wh`P+C9Ga0W~Xtd-{^<+-e)w&b4$ z@#5nT;nQH;igvjVF^ojjTuW_pKostir4{9NA29mEyNid}uN|4TxhrlC)WdXd>FZ z?h-VBx_toZ4Q;2-s*De{^r4;Sf;^URlfi%h+fm{Ob0O76slOabjS9;G-(|(y5k&(3 zek#h$5I=h*8r>7(VIL+i{Pd0V+%%S+M@0Bp@q8Q%5#q(@z7U^EjPS`!G$(+(`k}%- z#O*6nN~f#>J!8|-`3^7o1-QI(ZAuFGL9cj-g!Tk8}ZggIXanNhBaH* z%$w8Ym-akCd{i@ElJ?9)6rRw2KnzPg>MHL zWA%sB4CVRi!%2H|Ot>Z(icp)l{Aa9616{Nh!pveS`i2Ma03DLWEO3U&EX$~V4~xO) zi_s8B{5_ln-a`((@w7x)Y?Ng>9x2X(W=@XB{D&Y@N&83*@i)+~?fi2zqnK&lp^`u!hZ&&FuC{jXb#dH{4o*tBfc6Xo9PY^qOa0PMpSJ{ZCzqsyow}p zf%MA>yy z&-gy^>=Dmb#gmKYQSodQ&%=1~zFyPB`l*;#0}pG&_qGPaB!9U}cE=Aq(N(&^msURe%fvtfy@-U04P7ip72!ds&zS{&BQP zfb0S1(?^*E(%8XXe_@jn|0by6J>q*uiPa<2GTum>1O`T;OFUo1v-y$F@r)f;V$*<6 zxxSwOBxBbhyp$c;NNYJb+cR(3rm@O_gUW%XWqQ=+o~LhwQWXHG_$SW z5jNrvBb%>H`Q9&KJunO7*TYN%sn3?(GrjM9l7u$cB1!?on^i zxm~?p=dyZfRh62Dm=dqUXFWmia`&ynVMq6Z;jpdSi|}><(*!Z>E*$=p)}4=V)0bCj zv$1@#`k8GT@C_RK2^%GGo{Z!or=xEdC3Sy{6c(r8w_3+22VPE8$VUwk?|v1ZjJ?#d z?luIe*vr0NEPYiH|0;?VH0b^(Q6Pm!7br@3K$LQ`y0q!bh+5I~B~(@{BERM z?U4}bzJtJg>$C~wsYFPs)mz=A_+;Vl>b`0??CGA4aEpE3_1cuC2W)e-iRD9CL7-ID zLCiMic?H0A0^lhkGFc%~0KX@IHA?JFdf%(WUZeMSFj1hlro{Hsd$SVTOYdb$?3Z{O zdx;woaT2be^4!6ovG*{7T!u=A;%kW$=Y`c7EJ1>o*h`$ppM(Z)v6oxb##)uwlhE!L zK|BbE?rM}zjMBeG`2mMsRATo-#`XSMNL zPiK55szNTw;(m*0{!-DMiCyRLQJA!hU8fN=;!ohIB&twBXPo+q?3dk7A=(!wGR*;f zmH4Ab9Mw+-q9dQRF(aRtkO%#|sinU_GzQmLfG(6X%$CM}s#}Tu+JSZPpq9P+VJHV9 zPKiuBJL5!5YDD)oz~~%Qe-}8Rt@jtTDY45@HnsU*=;L2kq0UjBUo;Smkm)WFrzQsz zaZ(FGek(>;EF>{BP3w%4xKbs_@hyu6ngw8|fTKh!qlHy>F)CtYnXuY`0oli@9KP4p zxmNRteU+CaBSCFY-H#O=Jk~#|5j}R|7;01ZpAg)=bGW@hevqcf-LE5A?_aO{-~#Ga zVjtqE_ur%Jcu}N(Q~CZ}jI(RqYcK--f` z*$u-u^BYl7987l&tm;-akLp~@;>4P3jf|vh1&xdm!gT*1BCt>!eya-TOo@qvzBZ|e zQ2iNDWtptbp?AvNZz7_NZTj+?+C3IKAuc7urGmA#W*FkVeLpeU9(>ulfC;|b-cb+0 z5TB6^X%XtM(`pIQ=fw7l3m7PqEu?nW_-d^ex*@!pOr$qxsd${!Og_Ogsu`H35A(O_T{B-&NY!RG*-ckbdHk+HO0|vjjb;+l<6Mq$Ue>zCnpS z2ekn9jv3VFG&VekjGbcGz8tU@^*K}|I^kYGwg>=6O-KB9C~8h~{7t+%<45rXFG$@q z7euEagA%`$O73*@wt3Wii!!}!nDQtuEgDEVNO&H@L}t+dCE6duOzQXu&}83R+a_*t z_&PR>?K`O-m-^lvXQA4JXT_&C#wmJUf{F~PzJ;U$!y{?@r5_;)a ze{z;kSR(>#DXe7X%}ph+4-@QPELf`|eLpD~P<#ctkO^UZ+OJ**V<{Lc%j&ADlKD^D zh9X7D?5ESzvDO!l)qQ}Km>9K-c6Fh+qFvOf78^LViKdv`C4?Z?Mm>D}Ux7K>T~>yb3k%G<(9(Q-eiF; zW^X3gPV@i@BfZ3523R;XaoaM4t4g?fQVe|xA*Ok~9;8Dmc9>rVFv`@;FdHt*cs>|&PpyPe0UP`2eD=g zvFfgbQ|!MPHa(pX@+5W&jIJDok-l1%npPJ!4WXp3E&+NLPGjwF!I|Z_iN$Cc<=?U^ znZZOzzo$!rJI}YV`NpupW2zzj{GeLXVuu9W`n0TN!|A}^<;Os!&SP2^>!5w2kEXSK zlwqH1ZHplztSactN=M`gEK3rV&LEFnX(6w~j-W+mrHrb}^}uPE_qw+H$a{*Nr4ow8 zzFGz?FS2RJF{5dTqbb?YQR&zY>tcGecNr|O?N!1;-1-;v**su^4QMcbISfGyV8u(} zHrJScDG^rhPt&Lre=8-P)A48e6~K=WdCcfqdgpaqO6I^4`F zK}}d6kG*)cjinU7J8j5RgJojK+lx)wDSSUVPHfMn%&-B(Q)XB@^Sg$Yn#i#yh~@O~ zVsRFx43?7=Ef)2sPGY2yYNLx2@%IoSZ-cY2)IzclGvc!#BZ>GNJRx94d^Q3p^_h5& z!jF)M8oNlT7}k16tTxu}c%&amYj-5hh}SOCB5QZV4~f@Pt>X1d63xedAT%NiI1<&4 zPEnH$n$emj7>RQLVK)z0v#L&k)I^8W+9{AF*2UBSh?;rJK)tBMPMUdlAe0b@qx*u0 zz--_|=gQGEUJdhoI6@_ud5iH05LI|VzDc?VJ|^iFrVO)~h{mtX2Rs^&JPJgM^)vaFePM&_EvDU)I+oE9Fs07GIqHqX z11^%P9Ja(^f5Yo6;XnHbcrS5cpTmkjM)3ePJsfM5_ylButt7FO8?^&$xs!Gcs?X>b z2Gv#YpGi2Dv&9d&6BQ4+j6e@0KF|+?vzxumV=x1vQd_)ri+|f97U*XuQLFZPQzNv0 zA%k>}M&Ys)3L$~QjeLSY;hfdNb|6kIP96bux0l|%;oDvCM=09?jfL4?gx*}APLf3? zdW9{Oqqf`4JW7W@2etzEbQtSkrV7NztT#^ri)SK{5ncM`jbVKA(V8A zqm5NETDO0WB>jd|L}{&4iQSGss@PZfoA}gSfE3HzR_E;{tLUXvReu=XF_)L7-vPGW zI1T&ug(LuD|W&H7y!uIhCFTlmu0not*lf@ z%PpJ;soA9gr~1Dvt?jQ$qirwINSJ_!P(z8X|80r;trDZo$YvUmPe56~N*V7}HN7l` zUbJiFQ3s!dfm&=5g!m1pD2!1O-JKPJcN0a2?d;iL6=5p90XQYcAZI!V9BvPRgvII= zWVx{*aQ%P2W9=~sEz*<6$Ha^)DE+C zm#>U`NgC@|U)x7%!fC|bQJSw-Fsaw?)Kw+OUnVmHjbnB*a9TIrTV@F`=E$%dDJoE{ zNHOPT@UOs6VaxZVAY)PTUsB>f>;z*ISlRduY1A6QU9eATGOKj5!%ZL9;a7P+P4oXu zhQz9+kmfozzo;Lh`0P4(oZbabsc?{gTtRZ;^mW2kS?P?m-mmCgUm2CoWTw8v>Cs;? zS0SUm)`78mC2JotUs5$NFlJ#(0K^R^uLEPJpG_u$FQLQ_~`{8sIac%$yfJ|br?mbEn9!Zyl#plAg(29qyxaq993=Nu)WqY^=ggyWgg5_M&Y zpdmD4((h4i*n9jYW9dMOmd~&%XK$OXUQ@bM*2V_;Erb~neJY5aoK)H1r@w}B5jB_~LP z2GvBz@Gwye!c#g`n=Ob@$5oF-2yJ2=AEdmT4d;TyC9{qB$;>+bA$=O^jVu&HK4E_b zWIKwTm7;yh4(lJs-b$e-^uex8 z_YNtpTlEe_{|I}9wEOK#Uk`1z=?18z#e^6*kkn=swo*x(4YhC;wXpuQ?+@x&e6FkI z8K=b5&i4oHt`OV^Qc7$M*n^!!;^NY>CiIo+4e=k6IRnWQ{b0wsmK&RX%S`$|=X#ookhCNZGc? zMGp@>=Fr1Wk03o((_?+&r6#oIX6-0LNq?%hiiHo%0Lbwe>-T3`g2EIsFYSshpOGWKvb0B0J;;R3Pr9Ne=4_JFJCASN1ch-~a<)#uLsJH92a?)!t@ ziGq7585s9aau52IEp^!s7afJ`bq(Jt%A&4Fp#vW95D%=z4hro*uT^HX!3zQ!R7%dI z%{YlkWf*Ybj#f5>UUqM5dusBp-*XyMDxo5XAHRVjECJKc!11LP6L%wU4tUl+zKk7) z-tcbWELAvkSWx|4Lu$xv}(&QQafl&5^VedHR?41qOhCL(SzYfG{apR7rXi zehd6DB<&$TH((+Lff_Licu&>&&Z=;Xa&GeQ02a#831Q&@0{)cwt77%-W*x#g6dew3 zZ&xR^NH?~t(2;R}5E$jTfD_!&veX^B!!|{mD)!dLfiakI7!4&)nwbF?Q56J6xBCB<2Ts%>w%swm z5p;*KBsC>VeZc1WcEMA_>6oUa+}=pE|FnRHTlYl^yFJg$z<7}J3wq`~P0uM$(zEyp zdX_zo=h_{4hs7)BMe&;QsCcD6EMAxH6tAmx;PvNY z?pKA-Fd&Lp!bN`fM?ZqJfYZweK*9>n#u>pxsO*bYa7Ws&dJ+>Tb%xFz>O`IAsLm=O zQ2QL1+O_W+C!P+B$?f~bQkVu*9G$TNH?NtfET{|e3vWV$wJOgaW^Kk+2kj|ub+&!r z%5F<+b^ZM3KYxLSLd)A|w*O+oYkHMGSoBW;P+hf!CE(DpM0 z5b}`~H#WHA9D{t&+~_d#B52-Al#k5v7eFU(YjZ4}1Rw7A4d+_op8>QZP6-}Zt*%b& z`Wy+$bBC4Z?7qXBCKR>#gNcW8=zG+2J1;>KfMPkenBcs6613dtOvDF}1+@iHGXVyL zyW9I-&s!VRgnTfUyT5WT@?XTEPx7$YC8f{O>dh`&23to zF~!xgBb|y(j-~lg9wm7w2?aIp$RKhh<&KyLNYvB=$&f|G&iHAR^HX5#J#vKzvqvZ; z5zD1q_M?eAJ^F=7o19IHb5YANYaSx^JC#C#K4-ABlVk?97?-pKri`J`C^lj@Tbt2mo!F*JPJ?y@BF^sVe{vm+d zqdEL61~0Kn00=xne8s}G?|LjIF2RCpJ-QOp0mYg#shJ`Ey|aMdO+dz?2ouoA2GDf? z9U76r98&W8OgoJV_Ce35rr%IF@VKibjibJerNfk0;jX6-4r)_7(zBJ1RbB^Yju~&e}L^~@^yQUlTv1@ zBA9`54bp31Vp;A`Vs+FFo;0-R!Oux1PR36uu}UPq&R(Gd?_QH z-I&v|IKQB|xp^Xe=(awPG&MqF<&%bKZr+(s-#&t279BQ>_IM%5!-)So5yF^4AhqV( zL(&Wq!DjXrC3Eh!|EY z7vSS$K1aFuPf!CESr0vX5x~160L22pe2&WF2S?JMN02hMS{W-)vY$P42(hb(MT7jG z0Kgu46=5+oFX{|(T_hbv62&x8SSw;YiXi4Zi37hwjAfQJW6M;XSo$borC~ii8Pgl{ z23`)Za5%9Q4#YA!CT!oYBo>+6HO(c(p3ZS!CvGTNzSBX%-rEqrFFu3 z0Co?&&;<_o%rvUkg%%s5cxToQ5N>rh48y<;K;Ii;b9{a3 ztU9BFw-Hxj#G4%AwBo~BI7~y{qtquD^1>whtP>}mT4}6p>h;5OwHsqC9ZqIF)>vD) z9`m%V7;6i79wo0|ml|-tf?lQpw*fhjoj*v*f!0om%5|)ayzKeCsC3kNR>)f$KpTZ# z(oS2Gu8>(A12ijc0u{}-(1z)|n~*@Jn~B)-r;p}a=23i*SyMmcD|z_=^+VW1hTN%f z(vZ(5bO4ecS%Xg)sAi!w$^tEC9))hiq5*bPOw_*ztWpE_|GlaQ{!Z2H$A+rj`9D={ z=EZ=LI3$p&*UY0PvmQ`%vRUl96ePQckb_@ts@ZwX1kkaveV8H>K#_cc^bsVyzH^9H z=5C@AQ7jit-+@eej-XrjZy-qM+$X4WAH<%?*C+=za1i?FCX6GUl`D33`!UI0WNdYV zc!d@**%TtCdBS*zs2`zLnixwFCz2Rj*LOTbOR4gXhi*l@yt6VwDin(KJ|WcL2{ELQ z01xS2_@d%yBd;a^VFhp+mFvhrvzs^vVRPd;PL|GLdruy6@N~4G9q0j96kkkAf_QJX z2+%UYGU1xVL=^aR|05&-o+3oyB@x=T#j51j9Ez_8cDG*jM$lQ1uh>l_uohmV!0kO(LP#4N@EEUEoXInA56`O0t{sKJlZJrhT*oyhB*gICN!iv3O#j32> zek-=3jJlF4`2{6_TwNHotTB0O1lr;fG+}riY+8d}9p6U4L%mdI_0qplMx>#0CAM`P z^3JT|XEDzY`-GsY?(L>fDo!{8YcSNAFr^I_G8MT({BkOn2e5fU5+J&7BR1$EhzL7* z)C!{q|C&MXejRWO7HlQ95-6}@;>JkpheGE@o~8F5C;HEPEAq66kR&1Ugosejns4c4 z1cAIHP*Ykbt&Ao)n-mt{*6AhKP?jY%94~Hblx12JK-Y@>_8|Ya z@ic!yo#WtT9ZhQv^f%X^?+AQJXI8yOn(O;J0_UZLCI zvK2;A{g4N$!BrACM+=}HS^&Y8>{gx+49pBTn;Or7&0)~d?^^%W(6Xq8yvIX)Ll=!e z*wS={pMFrA$mhcL+bNOhSZs5^_4yh!1ui~0e3JMy1D}!~Vl@W`hY4^|f7+$QzK1ln zMAo|oja+PzpfJ7bbNw(p+ns=bCHrT>9ey@n*N$Ez=Xur1SBo$?&gYQTNOpk^Xaw}_ zR6l~)D4|tHof2!J(sAHyexk~T(_~BXi~4W&UBF?rtyAjg)El2yL=?b=>p-$vKkPxR zwAFGyjIrd9F_|1PCa^X*UbAC3yDeO=Q^&Sbr?DL#6@K`&wKcp2YIo*AFcyszm!j5| zYPnfXPJl+OgQ-YV_ZoaNtm<&qO3g~q3GRleK3%mOhj1-}V-2>KW!mcyelxy;ubQEC z)hx0P>gL3T&+t(6O=xD+&fle0>-{z*HrGlxLJ6P* z6xe^eG3%&($pfjV<2y?PZeXVz>$Lmt-X}S6iyKo8lmZ5udmZUzmo0=mihCbW!DW$U zC?|3ujnvSR;S!V~*Z7@Q8ITD0$oqlgyp1Ix{w_Jpf9A7yMC~ukowZPk+<`)h4#N-~ zx`B|O;c=|D*FvM(Dgs8t-bfH|@N`=*_|`ds>J=6Y_VcmpvIB$y(5+twa-`bh^4O%v zERS{8j64{(^7QTCPawj{E9(rUYit}h7g@Mp(B+rD%YhBM7<1yhjko^ zmY)OsH;9v_@%1SW(nOfOU-XAWxkK-FG;FHl#i#~n`^z0+U;l=xeZq~Ye?uDUw0FXS zq=3~1_=XRtBH%J1u?Slf4StbYpGsA)ZM%?$#y!g4gc&=$hmLyDlC={t181roA^xKH zK*znnonf-!iY8+`hF#XfJ0bma#_17&frO%jJp_&EKzcMEXZ^8tMkn$yLF%Dl`Yw>4 z?>r1>nzNv;ej>%FDeTauQzHP|`F8+mk%?fR2YJXB3A>$Dv}_6O>pJI`4$z|xdtn_L z6oykV;-p@u!#CLQh0w8~eVm}^@jpS;!SMOKAImQEat9glJ8{GzLpNtNa1>+tdtj3z zb%M&K;`9!1SUAt#w!K80p86b@7Gy)H)|OV~D-R!J2Zb++b^AohUj#H{RrBnJmFE|_ zYeUNO-_7tI$E`+ke!O?%WY*}!{;KbMLl#>m+u!kBXc%*o-a5Rq4TZF7J( zuYC{P;2|#eZ$@ns1XCPM;#jMHR0+Iqo+R;gfNhVIEl0M?$&$E-bVmD-o(%ETU_qK5 zT9z0VTCrP2XVN;7yg+nn}yeXlfp_N`W@{h;sg2D!9UbKq>XwL38e zq{ncRI$BE>X#GOE<|NlX;M7fa82thi>H7$PRKC9C24uAi5c_&!R{iJ)Q_ zaOio=e%|+XW8t@sIN8<}`Wl?tU}fU-6#9IV{SQFMcVf#QS^WTZz_zX_`#$!*w5-m` zH6-xKm1R4J;@c^{qzuMH>wApi^UHoT6pvH<>axU8{6UIOE&IVx{2_|xmi>_8nJB*n zadYDu>~fw68(Y`FEdh`-aY0k5DhzSZlrYqH+z^mR0xLDTKk@=9OZhIIN2I@h;?I4VwyW0G+f1n&T$xSJly z)#j!Z>;$g|Bg4t3LuMJtJ6XHV6?LA@Gt{CgEVf(T88SN!jZ-e9VBAUm#{oibH$9RQ z4p5tS(<3?N0JVBIJyKhjK|TR(Falj++}F_91H2Y(BM>`j-*@0pxZq2!_fd z?y@N3(^ z%P&G^^+@ezF-7zQ!m|l?sHj(CaaV|o+_Jn!u--yr&%?AHVFkK)fvVRhFEUM$v!Pjt!3mawm z$cOr0u}Y{--h>0H$iPmPH_a~#tJg+twfrpT3RoIRmxOAAyzy!<5uD&a$ss{`>32d< zFhttVlHvaaQ((lOBmugVkdySwv9Nm*6o6ntcZQ)%Aof&0-zuOeDA7Fov^5QaM?$T) zHDqM6KVt{HldRJaBw5WOT@a8R#&`%%)BG8l3pXwW2L5XXF21XzDf>J#6V3{9OGa}V ze3hInQ%(rcr%lZo5J{5?QF>~1I}h!B`QF5u~Rs2ipwChpEX_Z;6|?t zS=vuglB44$6TCJcp=C;}8)#79sg8MBT1I8^?2_b%;sY6R>Fg;G#63WSpv$!3ShV*@ zGOco9)BF|cdBXNG>;YmXNOw+PuhiC5G6Ta+Pcp~b3eTUw0Nvgf7&z7qU(Rtii^|hh z+=K=l(Y~OzfCbd00!JAr+&V8yU4-lV%5dg32;iCgT~aG(WKK&4nrAi6#7b?brO6!r zd36tj-g!*n>Ku>RA*;8K@h7Y zXIh3Wy??VdCYrWv4}HK5RiXqes^Z%LMDA8rR&n*l%Sd9KYfGo8xqkmz7~juZuRpWm zXHXlQLW(+TkM;Y5b-30gaL#-SE+?SMHSnB!6a5C_AU3@g%m04N%g+IdY#Zd^Il#kc zJNa;7VgM`BFHjt7Pp*J_y$X}Q_Mn;fG$r-;&ML76&=B|Mj3IB23-stM>hK3q7yl4) z3c&~3PMC6^L=NGYg!)2t{NIa&T&F&eW9ZP*o&*eo19&q+r=wu++=r}t$W0CCrI8Bt z?;&^5lp@9Mtk@yd@97tUQ(O1al8^lV4HFH{2Y0GD@pd(<@8}+KbV#noom6OT-m8SZ zHsICz&Ah`1dwVQ1AiWQXI3})uYbChAId7oH+XLUP%mcTfl2|s9s?}qu+GD(o?7bga`z(b7AVKfwQ9bd&7(*ohyh+`4}Ub+Og zv~|&8Yi1q(z`|cSP+@cEU4GcPtrj1);c|rZ&7h1mZVgY->F%t)Hmt1SgWY1&+h`wk ziIt#zPP^Pv%D*f1Vm5JwRO$jLT-;(^AH~_i0pz?cc3Lg`8R!Yedb}i4O-sI(SZGo$ zMQ!bgg@ePPuZBYdsgTgG=p#sh=EN=;YjpX}YHr_!jV{m#ESP4%jjCI$Fh$&sGdARG zV{Y3xncoc?+o-#V&cN^r^5AYFTt<{n8}c7wSq7U?=`yzxe;l~sE+qF0w9H+L-P`LS zyb5Z{uB#34r~ixcI=Kr)c1o~lY7N}$NT3DGrK4abA)Kgo*3{O8qP9e}yQbEtcfuZK=8>=> zqZ=+=N_-_{sg~iAwcoHMUl`H~|DeR_&;rTZH|c#rd1w{h)U0FwDVo)N8{&f24QDbFm0TU4)q%80Ig4cVPW_N8w!k%Rwl;KX1G`F?VBP#ecb2HVzT!58yi4SA`b?HokcpJnUbfZl{PF zk>oRLejvmQH=%*0+DR7r7CLCtbRWUtdQMc0GX~zneB53WmY7JsxgPxBf|Zod2bsaC z^#TUXFw*vsD8s3eZn3<={BD8y-F)-Avv^(#5HmvD4qVGVp>f@NoD6p6G0b_;>7TGK zSQ~alR?VS_5WXJ4chmd`;}eKP*Ud!gqJH>H{=^E&IvG)+-cV%M^_&01SS0H0MKv$grs5Or# ze{;CeD&O0U=GE4*vNezey^K^nxg<}=whvsAzk~U#Wx3i9o(+e0lk$hTOUuO;4{qj4 zl2>04XBKhf3p<6i#H3_&!u-@$Y5C=joC$cF{3W!jqt2D3>B5^fj~M$Vm|SQkqX41q z2T%b2Y3>2D36oLt^mS3MHXxT;nz5fClr6_(g z&5ZNmC;~14*6HL!T?_*!%vVHtjCz-|@_{NWfYVq9UHf&K-&hC=^N&yg7CXr8M9E-I zy78zABU=W%n&G@W?8Qu0LFxuGkGjMv)ARK*Kbna$O|6T+L`^#69$NTe%8totm!w@g zstZths1|A@RqXFjEbE6;4?L#pWi+}9BOlnJ@if*Y@t06S%G-H%h(Gyfd?E*y<6uV~ z#6AVi5o+s34s={NLIlf5uA;m&lJFu6NR3z>mHe*2h>?FG+|6B3U|-OciP^-Shp#}#vXgWHA5YNa6U!+q zq};yuH@J$N+-9bU!#^pzU+qcXRI%2RJ6N!&X5ogfS!cW}_M>(lIwZ zfe*Ebf@|4$_;a(+fU&e6F5DR2dJoz(we3sCE&7)WHrk^L?qs(*e7DNlO|*U1q<`tz zFp0fyeZ{_t!7Obi5STtGS&+D;Yxv9K`^c{aAF<4kr-vQzf@8HZTke1_ zmA(3$ai@cpRCwMl!x0N;(N4*zTI>7u4{b*MIVBEz6z)~*XZ8JU7aY+A;K^H8`rhA| z#@@HXm?m-|yYDTeyybfrCsN?||6PagyRzmxAaK6m*)Wm4a^kbTx2CJWcd^}}O(&$T zOD1is$|nkYqPH#_KxLQx{SSvHo)AToTevB1O*7qscSN~{T$U_eed zkFhYIW!is2{v~+Ic>0#e+UgdNtGQYkY->h?AtOhv79Yn zC|3L;L^vY(C8_NL#a`w7Z<;&Q)?kGqzKblWva^D+h~g})^-+JanYz>}7pa3)3H#&j%?M%nM&-lef!)5j zxF+{ot!{W}P%Xn+lGGUvThXOjoAq?c<+5_^5yIE&whQ>kp@q=!7ai>|DzP=9c19f$ z$s>&8F1nuZB+A21Ac`DkZgdS-L#<8zL|-DCxMORp!%Qc{SfvY7W`--&hwRbd0Jad8 zc=lZv7M)4Ey|on+;3sDoV)i>|hh75n`- zH-jEcA%g)`CS%Vo^jhM_(t0R?r8p(9shquB^hR5^6FWQ$^{ReTZ$6`7g^<`efS2LI z`*Ubd|3D8#gO1K7jsQi{X>oV6_6pY4m`A6R=Sku=CoWqz7RrfR5Ri?94t>qPR0wyK z7ypI$rKPgGC^KCCKePnH(pwNhEInLUcsSYH zMK#c96Wcyf*vntjXy@2%131BRv+s+&8T)^0jzv~DGRt=!UY=RF%PA!+PSEVc;+x04jyWuz`9C8z0a zP;et3AKyt09HrxKlTn%hWp|r{ZIg}rF;RCFy>6=>AcKtZ{igs;$2D+d$8_A5SbQzE zWQCGl#p=%`3N9G+E+|OKU+*%)vT>_}G|H_qp1!cG)wL|ngccc3S|rnlI+%#ZR zT-V<{52V9tuLLh8L3{Ji5gV__imv8s%5AodpfBay=|iYK@SFKaA)n! z`gu>Nt}$DG-8}J`UfpjdbHH}`%ci&Y#3wXN=Lo&`4(0{54(6M=w14Jc_S@PRz1T~Rl^A0wq2=ksVQv3&T--P-z znVBn^D-8S%Dw>y7pTWRCJv%uY(qn<`5JRE`J$=%kf*e{lfB-uER!3^0(2sg#_74u@ zeg`UK|3HdCiDBCf3TcQlZ;=fE)DVDCBd73MX>n%uU>mry8C=>pv#Bv#(y|5XL25qF z^05&n9mv|!TtSltfaHuYXx0NX=SsY2p}M3?Oo~o?mUROZ8H~u;#u#JqSQ2{ZLaoPs zjN}?g*Fmh$vE0P{He)`F%a{13&^QZnW3DA83tFarDJ79wHRQxiju9p&yOE5s7iX5S zPAT9u2VnQ0f2q4R-q|na&DrhAn{dUUuHF#hhY!*=#Yui>7P*An_97irPU5O2oo*Uy zOh-vz=E?#LyJLd@1MDHwJ>lqR{3b&uuKRc$ zRa&(RM0m(TfwmKzbj_mbq{47k@OqTc9^%A+hT{dTmTLg5;Yh9^SeHWDVf^ zPG5p0ObJX>BS$}QtpRL@Mtm;(zl^;l;yDM;Qq3i-!QHSe;4YHOc?FQc!u3kLQijC| zsD%F~sDR}K4dDj>ip4gzraN(+OJc5dkxPd4`v&&TmSu%$r;c7Q_Rd1_&ATqgv*|(_ z?NHdXIT(ccj?t#VW&9LM1V(fCO9+gvYLQh{cRA|8$m z-~lI6RXK*E5J9AvdGFyn+a;(a3c&7Xd>(S*x&q~)n?QFXUV&&!oZ5%W|Ki_-47X%6 z(Q0oier1I=N8(f&F4phVH{(93yq4hH=B4MFtN%i`>qOJ&mZjva%7L~Zf16w=u@t|N zC8*A#SM1f;Df0UcD-S(|f&m-%BOMFxd07fk6SCe7GO?X$W$1$etD()gv9Vi~;F zCn%}JBUFzlG%bavdIc_e2^!)%?=Kt;>=SrU%PeegG`3XKr#yK6E3D-&$9I<7GTy?n z`3_|+%QY&LlI~o5@E#!+04sw(UjlbAOA19tfaBt{6O-buYH*haS#ZIU;3SqHLg-Hs zuSrFMHxltGM10k*4W;Z6`f7@B}+rAq7FL4k^cPF$PXBT7m8RsSpzmmpDjw z(ki70#|jhi*+>t9d8k}VN=CZ*CV?+O*aWS7?aGcDMH*FIBw7N4g!15Gl-=#Y7fUc8 z@=E*|8dge8sz&-qlL!y}Da!v>O{!#%h_6;(D$kEwxNxnGW=+sVv(lnD%hwwDe!ni- zoR)g6HC%rGcEK}))V{s{`}Tc9qC{HC`gjazkX!(kNl;e$`2}+?sVj5N5W~RbMG#Yeilh*{Kq7N- z`TBlJleBgEegUIi6-{4RDkK!Ye(|3$(WdsYeuJPfC%GUcy$8s6o4ht97ee3rVQ>{3 z*i>?fSUVT;29du2q~QO6pzaa7^iC!aDH2SyYB^>J-q%+0le@$TI#;BJhU*x>X_1dz zx5<3Im6y*H#lbF0#fZf#2J+6~4Y=t%4*)nya{)$p3vFvi*Ad5XiK~d{2YC_&;{G)_ z^N738ShjLt@wE>91DpC%ke8C8!RXHHy%lqCamNHAt94P%)%{coTzgL^C-6sytKd%{ zXq3?0V#s7l7}AWv0d&MKAn8;p*_K`XXxr1skZRj_e%o+C)TVz&PM8vp$=Ak8g~#pgOEkaztzB*z)dvpU#TW*zC*i%^otfUrgsgxN5v5AXO1A$2ZMX_kg%wV(7t+Gz<}TVG4u+y55@fqQ~6UsY}D@M)fS$(ouQTV5b`>jrzVexEzt|w)aI#N zy*R^HVsFpgJqzGszw-<~`_IG)*zc4z>|D6(fMAI483X=4!x@xnA5Z%tk@9F=du4^mXSwa*9zdvm_ucS4CD1|OA7qubHlHmx|ZnXXEN7wgnS z;0*lz@p~IMQ+O2fS>f%E3)S)CGy@y{NI!rx@H7_Z?IdD!#rd6>sbX_x)DhIFP=QW{8&p4&QuZtn=V zZZ64JWj}sasaHP&)^HcKRrvz$Mw{OVxOWpg+%}ZhFHktf{@9bmBIHp*J5%CknLM~! zDg$THjev(0pF!ntz^E@IzYsSTJS0hu-vSnn7@Eg&KT%>oK*H8?Yd@n8?Q0LdAhvwJ6fe`RYRwH-s~!y=QFLVp5(V+N``2PuwrW)S-D;7ncuuNm@@yQl^5 zq{4{+04@|hEdqVZ!7$Z_Giqz;*Q^}1waE+%5ds8dJ=VAn`)kNLqK&-#SD1*x6dLXh zi>|>AN)PEo(K~LOaHQYF8ty96%N`FY>%bYTCBzzVI`a7f9wl}PErhQVybREN)Ngz~ zK(XBinxh53W5rw$6x7C7i=e;-u05IF-tOm-duy5A-?ga(-DGv@1pdNwP-OsaOTX{T z6jbRHRG||$U!zJtr~(%S^;t9)hal$sQ0PuX&ztZJw0smo9EP4mYn}Lg zE^>m6i=>XkJzX#^h#3U`@gu{ROkxZINommdMu`JO2f|PrvQbQc$+@G%oE*SJV!9|q$nP8I z6q4UgyoLO71cdzNgDEnF{N|6yuZQHrRF!-bZb3l^*8N6734 zE>CLSUJ?$0JlMN{egkf}CFo+la0=L)c$Q$ zUfysYQH_xMymQ19{rHMwSr7e+IHEIg&za%wfAmLxqx*k|M0C99esJQ&eLrE4S_+%) zUwg>Vbb$Q-w?hbVkqe)I`pk_o&lPVc&k%1HAN&tWck^EH&gY-e`+EMdh#!v9UY=kcH7tsnB68~yxYkyOEVh<6o_iT7f@ zMZAMt74JLvI`Lk{*NFEDzCyfL^E-aqJUeD)>x5{UW_hw!w-dlJ9 z-h{$)P2e(~OR3MrC}3XE}-^0h*?;$R@I?@Z;n!79b&OJ9~sxztK=`_fmWQpQ^;`M&hksT7-)Qs7Hp zlS=su&r1?|-{HaPr;z-S7Q8-#O6UW^C%za^;g}z92r4(tvF!fmr5a zJS;8b)P|e0exUHohGYxhZ`mP@AX0KDZ5H&@jzzaO0|%#HqT8=uV2JGLdyRwY6Rw{P zZfILze29pq3yoW+h-X>*`ylx9UblY0a`M9B*I1homJT+iV-t39e{gq<^GEivs4|2< zxIctH(uR%w)Tfph=Ogy9)$eh8aj!dan?uoa!GU_A&X^QuR$}#!sT!$NiInD|WsypK z@cl@oUX5VR2hjPJdRQURhZNc?IBxwa}Ch{Aa>SxA)w3SZ@#Yhsy4 zP|l_8>llZfjds`wlS(vm=`-E#+XE-j-OE!V~k5Uu8(XsT{F^SjbV5Wo>62o zT<|wAW1Dc?Ktd9tk(*OB#{DS-|bmL}j7PX|FWyW+mHw#8tcSev`A9oJxVHI)r zIzJC}fBtuzsb`lhHyq2B7q(vsO*?GTbSPF)F~!QACEpi5d@MBfo5$}?)3ya#pOeb^ z+wDFs;M#2aFzVB}Ee+c~O(*3$?mBTD{FwqQ1;$A8#-k^weojo|>{!yRpA+kEvH4q7 z>MwSu&baIjt3t*2TVnmKu~LS|yF+cW!eGx;N{A6zzSehtC5^Ypb04q^cm{Y9*a18Q z+y?|QzjnMK^RDB#Ca#Hl0`~-N2W|)MN!*jTow%L2@I~+HYO)IpN3(UXHo2uY>8 z0LRzUv=IOkf7x;r-b;<6pRL-5ePmunw+PJ<3EQM!11~D2E8GcVdpcp@Cm%l6MZUG) zAeYeTH)!c(9!V?GCugianJ9g-g|ZMr0&lyA=VyR6pmDZs%%S=@HvfC7_1;&l_b*XN zOWDF4X9zb&)&27-M#UiQDHLcXkO|BK76Uf} z#lTvCwjM!SkHAgBO~M_5i$(9Rxo{B{{aPX}0;*qg;5u;axG3t6?i;I(wvpa_zz*P- zl6ItTX4`0isJ>9|)HbRgs2gD{zg~S8nQXY9Z@mqK)Iy6ygSF6p0HGslrCqpCm`1G2 z;9Z;(^RWclWeyq46nhzTuGJW9#yt`t)dX4tuLo}cfojU>0>2U&dF`0O*a&!`g`0xV z_4k;kA7(QOzN}0Egl%J6RIw(gU$yQ}!0lkN%H_SXAtlK|yb2Nn4zyTm#DsuFp&Ma7 zD86p=D&kt?qCiXFwf2KdgFYlWA0Z&oE$t3yk?7jCs|_Kz@3TpCaH_7c61cce0^hR| zfE^y#9lXh7R=MOj)kDYw_3Jrdm_JacpQ{0d!b{qMmzevB9VT=h;!((XN0kPz2uUxI znxI8Eu%ykLM9zxn_0N)pg_>Bl_LQ`Z`7HfVfMfuoFEsK%|J+1JYkHCh$OH%TVsAA&K4fHf7Uk66I`ltZsj&7R0VDxhlW0=Fkw-#@dXy@ zu!@b7A95+hI%W^S*JI9mhC12D9vA;dB$?1_9`icO^Puv)C+vBd<@uEIyf5rI5YK`~ z9^#E!3@LfgO5S6Bgp7W{BM;)gUH*W%EJztC!Sp#EGnYuAsq%&%{n?U&=mI&VUx|R@ z1a*oS)|At^uneK~6R^KLq1Q>g-zjw58~y8YXd<^3OxZ5wBHd(iksOFkOUX!ORB!u+=f$A>*d;LXqo()}ik#PvqOcQxo7xa^` z@U5Mxjg)?i`Azae-;PKbp!Cpg?s<&Vxbtd;>g7S8Gt!{6CPg@Gm!dqdbrnApUK0RyqDO0h8WWLVO``+2=Y<3G|DjLB=$9ia`_xPL_ArhHO^tYf=jil8$%&$eMWkI zi4vc`?|vp2)R?@>G_6q1mZ(4el)V47>MBBZ*W`WXWm}cJzboLGuqfaeyGU%~LYr}X zO59&AF>v!?iHD2!50OdOri9fKdp%8iV} z+*$}E{;UCe_Hu1u!_T<4aItl7A@gSrbFQo>^01tT;L}p!%(riK?L1{NizEOZ!g>MFyY+=aimhXD~B5Pl#LWVaj*8TN+T5|=FWEG;N3xQQDI zp@R`>{}80hh1PPy9JfV?0WL60S@XFHgl;qAN^|vty=6Q;f{xDws;%i1O)wTw7-IVo z7Oj+;A$lT+eC&q({2jXq%NZwf8%HrWFxKvW_Qw=GX5+;|faYRmnZsj>B|O3~3NX%n z_ddS!0S!0TV{e-=9M^d1oM3D1$5$Es{5eUnLBt*=8a6zktU`~x^G5O%`pcH<)x%il zT`4@k75PH#$H`DPvxY#6hn&+GKXV<{Jf_V9jV=?aCN2TCS58VA02|^dqCPIZ-x?;7#1{bN-}o zi0uuSK2r4nwDHiU9o!Ay5o65qx5euH>!5ZZySBDJwVVjmf6aLFMYs^BvXWw2H3q!~ z(;%lS6m;T)pvO`cGg}L5FC9yR#x_hBf8BPvu&Y-G!c+(*MZzTa`h*7T?%V$yJG&R< zlsGYzZp4?Y8_s}3d(e-V;|z>mx-JBb`a7IgHZbhZcV4;YyWqYN+&KEYvg11nH-1#U zgCkE6_Zj?-0}fug&mf<5UXj$nXS>6m`@EvcaNhGuIE?^Ftplon5?}?e6z~Aq066a7 z;k+W51wvBk9|O+-FN#kDC;q>7UP*pP@>S=Rw(p(yyfTGPa-t#dwoIN&fNenJjB(EM ziiG}r=M|N1B&}|&{TYjGTJnR>t)#{$@V%5uk7VPX)tx)}9i~;_$vBro~X_@fGK`p*c(6Shm z_ccfy4kG%9JhMigIdnL{Oju?TtP=+pgkUA)nQwrAeEPsq(87sB6bdBfn??76cEAp| zFgA55t4gq}O8mn|j^XANy!bhC48jd_s9~TBmfYvWp%H)+$2)KWtZ>$eqk?x*}%En;RExS~IXSp9J;Iv|J~YrNURrg*tQC773oWE%2dA{FNFz}RpRg_uvaG0X<4 z)KO#ha9-1rjzt~`h)KCbm8#yvWnIKul`Kc%2BF2HVwY^#;84=0h8L9xUmS)sI5efu zrMsq&67AV?*ESC6u?BQ53x=+at{vtpUy=Tn>%hjPRv@fb>>NZei@|TH*Pe_fyaRH> z+qn}v>wgrKRZayp#0=C6%HTf}vvC}PLL1zZe+v)J`OV#n=)i?}W&PEaUEz{$-9>27 zp&VDLisExmUlyYe57bJ0b^X`NPKqF`ALem;0ng^WuokSF$I*omA&wcc<->L*C)w^$ z#@105(>pikRtXe*PBn`NCWH?v<}230wAUWEut~0FW8dub!7=*+d&g-odQ$iK5(3Qy z_h7xtK6cMla=P5A1>046G*w|;{F2`5r2AUC14SawNdSxguK5Tff1wp(ReX7WYCr5Ogjhy&`?wYGR z=ANe%{=|N?Z*Zu2VNWTB^VlE?Ocdog(hMR#lw^kPwpNPcxZNv7g4Sid) z6wVlH{)&i*#y*M@7L64NAM;8{S4rUpV*{F;2Dw!$>r^WrA`-cQ)8U#`$0fv znZuaInX8j&uMF()eo2pcLnnx>(zYf-IaoN1od1%^SY&iYDsf*+$~R27Y08`qCv9kw zOjU%BzDgnXV4bl>PIk|Hi{z}OM`r1#lo2###z@=|#HAWZB~MBt)U+%SQ46WK zB&rYRMQY-2Nega9LlI`8$l&K}0|k3jgm`SaHx-?&M0K8 zpVK~(`KfGoUd_k~D_z%%ni5q-x@~s`2G{LYmD*i>aUc7g{$0pyv;}|H{B9h!nN)WL zUiKfmwE0-SaEG;II_xp|W(#Pq)Xsjc&7=7)dXaWM%_h<lRvOXO z85-I}-KDi;2ThPg+FW5{1GBi~x37s}lTPVLNDgi}h!h;*XoQB5g8>Z+<530+()tZK zFJd{Zq2?7VEIGFRYp3 zk*$D3t&n7nnB$*kl5`ZzPCdQxrn<9=cb(gmIV~)raJ6}nWV089VtQEacB93s}thilfElNyKiX5FB zh20b=d=UdqBPF8|xe|g0#4%;}rNMjB4)Fa%gu-8S<#aM?jA+JXZZks&=UkaMtsY8^M%zQqUB);D>DSY`Fu^Sbnz z9EH?R_5+6qyE$#m!}kwpE@*%Aj0mNMed8m(d-3J$gc?6^mj*7%!t#ONljFiJRIp#u zw`n$PCsp?OyU0~523dloHJmcFbU zP~8$~Hm(%6$A0)&fb!Z@qM~U}s(4aSiKMN|60DmM&JR=xyNS9Y5{cTQLKM`#N~?$Q zo0C4SFd!5($($SLEhu>i$`o5mG-d%t7uwW*Kd}{0RewR9?YS|sW`dc}C;Hbv9UcDh ziZCuU5_E%s?J)f;3)E6_$qeH*!BiRx(LTW&J?5NP%1SGDICsWdK2z~QIB`xW$E7>K z;_T?p{nv?5AA`?EQ&$y+s*d;QL_}$vSwe}zd#92F?PyRHRFw)|o?;~GN9$@_QpL50 zmld|RlMRz5f)(wwup+itb$P<(DYKQ(5NRdz6g_+d$jKvuobFKwFjsu#0fOAh6Kav3!dXq z?80KUg~bXBPJ0m=Vx*8_SeLKkt19#q93Pg=6hqVamD`4n}uFnm#d z-PMxyNw@NAd()E6GTWks!eGk_RjC4-b#F+Uj1@sg>J}2h;?As2y}xs3&Y9*m$AIQu z%CF^|W3A_kzLm?mJYc_`1BZ|K{dD@z{%NOMXcprWjyJ~Zm&45;17{F6_KbIZ{bu}e zZEWm2Gg^7t!&A$QHqPbkF~*_E`)9Q2{lOhWAz$q2Hv-K!375J1@D*NnHdIKnx(>RWaAK)m75saoPQOP!}E< ze1oA{77AS_p%^*SP=cQ4F^^FR8A&yRA*$-stIIql@yG$)hLVY~J-k8+UUo_X?2-UM z371>VH8VBt}wcFL?3AnC^RvY2N?V43;m0q+?)mX(uQ zq0UY|3&z$*Xj!~joxy-y8^^P}1W>JPEimlCNvW@I9L4Elk$Dq-frAANOOk>YK&1}V zyv^VeArC9o6YOa ztq(}POI+yjj9uDpkXY(L=UuCDxd^z?US;MKty& zqGQGZ=N%wsAuIB+;7gXkrXY{5TxbhO8@?u2qF;d{xFy6G{I!TRZ+&ZHnkB3Jp~xyD zt~uP1+KQa@_)|34UWyzgXZ`3-1_)l!IBlC{*+^9KIJfK|Swu41)K-aUUX`gVK zj-MbS2)iEdE)9a7U)gwlRQ}V#`Cnu{{t@|iL4fAIVq0 zSiD|Q1yX!hHJmt9k~u!L34tz=Iv!Bbg~%oQ*tDag5`PK7=eUZUS9p}s(3~%va&`GH@`wk7UTQ#F4tl7D>yozE_0YEh!wNxgDVXT z^lP-oqmXtastbojFsL^IEfeDeUu*7+J$*!Qsh)S%Q^CX+qM#iF>Sf01?38#!8=LKE z{uIqPotIW-_m~Bn)v%J~8DuZ1tiSmtofaH~-8AOB(pWEA+eHby5gd&=z^}3FcG=(Id)dkFi2JZ*0m)g_4diCv&o6S-8O*OjcG)lN*C_|DKe> zPUqJ9SW6KAxSHWn5Kcn>eM6EJ-?)%Z7=huFBnRnrPXof{k`og8l=P{IV&b^VyoD|m z-KGT_7GW-We$$j+A=;cs!xfMT>ZV1t5G~P=q!3VqaOJgQPSccUuom4x2BMF(tjvz2 zf+TKk!b_0IJ^GU1d{xf38J4LZ*TkOwL(`mC)S}%vjX1L;p3^S`7*Cl!95*8p*SX~a zK8Oz2#Ag}?i^>ipZHB2zN*k?1rwGJWr9UgJAPqSn#-g-1&3$uTp7|uwx8k2~e(-8| zjOha{LEEVit?4$=cF;Pp#g=t~yHuy&7{34Xp)vawvNKLlJEP(B=bXgCWlaP(%s0=F zg*1uI$-c`BN`@FXpiQ$*wwKU`;wzKQ@?{&$m4=l;${>=7EF$sgij8i%C|{sscAoiz zCwZ{SeHl{%nV_`31>ORATngM8mTc+X_hl7PSLVJ^ta6nbg~kN)I2DYZ@a0y8qvt3E z(GfB`Dbz_0IEfzfF1o0o05xVi51q=qcBEauB(2dke2I4vFvme2^slp8n#QjKhFSgw`}{Rtuy`-1-Rmi_v|u&`}#z>)mGp5{Ng z@&+6UB>Xyb_UuLkUQbVc0qM*${trU_j?meh>y_ZW%a&VZz8-;Dihlhk zmctry)1J_{gP^dEB9 zbgEKdd%5{4AsUj*U*LobqX^v@l7L#!+7}W_G4Jv}Magf>wu>%_A?96HDh7^~U9ha~ zFZAc8wI1j)Tuw_`c9Ao9xU*#o~1#2$fy~hb z7ztQga~5kD9qc(0cw7QlgM=I}A%{uGA(4=TV)Kwt;}f_zV{%Gzc>?jFDg8o2uT)Eu zbIVs`dx28+g7eNQ9=Z4K{OYaZ7axNjI_?0U(rTSsL~kVdf_q;?z6`5@+={GCNigDS z9jKw%ROkZ%zM_bzwPMM@T4? zpg-GU8yJXh%n70CCN4NGweY0TPknd@d&?n?V)W6GSER#T%G*x(49X+gK{n4};01>U z;;q`JNga^`YK)=m+{({7DIGu^om-`bf;kJ7;l{=RTlTN(m(hL)FB}B0bjwk*)4u6K zGWQL-(YbR#TJ5uKkd!ptY`oC9^MLbL4f4t7EMbB`R_1o$S?AUO1Az8v_gik@;>r8D zjrPrE+b$Ann0HZfu!T`Eh*7c1|JlO=CNn9yoKHJe`Oh#iUgw>sfx2^5!+?y8G*}?6 z_NOEe7QdR$V!2~fQ+BLMb)bJ2w^Uta35sVg!)OcP{8=ufj?_RwBTMIb2g*%qpe%_D zlnJZ+HJu6izo0T?RfA0iOQ#GLc{szvxIlbMX20nQx@(%G7g<#wxK9KNUw~JOGJa; z`4oF7p>eKfv|6V0K4b9dW-TpVGvZRR+H`wuPN-Hau-PW=d5%f_#k@9=3S)C-4ChR7p z^M{nV#Lmohz!!j#fXi>D8QW88Iu)kh5gZj>&Vxh4tA8+&2dS1^qwZi%Jx9XWe|uJl z2C2=;l>MeuJ(>OgO4v%5&JrRFhh1XK(pci1Thr*n)~pkFYr(5|Af6T+&jVkz;K*50 za@{#gL!*hlB6YWOtJ8`gnUY^CYavftTQN{K&;h;<-kX!eG8oSn34`Ii3+i%C@?@{e zp}H}eKc@rT@(}8DTmPDqJKT})jv(5DPmrA!e0+yXkGEpE%twyVxcx*v_o;+ zj6SZ;+bN@2q7#d_=ZH8ZFzwSKNYl&3-*^SK!zr=?8iA}P5C{!_6uMu z>r%`F28JjbfdyC%C}10`-5(>`Vn6kr&rO-JV{6^D^*Nu^dOyjo&q0H7Em@svX50TM zBZC%-)o(A0<g9vVZ z{UbHk*={a@gmH<%S=hXvoobr-5CezT7;c&ouct1DHajH58i8tvh((V#~ACbJv(=lGD=vyeyU=ORe5lh28~WP4z*#s_HE3Q}BM8M~WU^k|;Ko%bPN1fzwP=H$50VDt;~T zZJjAKCpNvsAQzoIVY3-B9b}NljBRvWn{&4I*rsHm9G)|TV5@MtUAvCO*S@_e;Xpk? zW1kqKnE?(2yNJ}+AP33XYaQ-DjkTl%URHx?gIZM9bWh^&vQmaIb7&mz%1Q&t6CnXv zvM7BI7WVDcY7U<}ANN`6{PLSLYx{j46K-1IrKoBu#Y7GEL16{B+`URV18z`Bin5yu zcd$*kd?H~6t})W=&lhW}wl@B|%cZ*&3ChQw%~oBOW^LB8Wi}xm)W9N12xL4We7g%| zDAgQIJ*&?&pCx|7^dO3_Qj9hoIq{=N9AzCB5w4u$y@XgWIcTq?Hi#~K=PjzUhhXLa zieqi+3l|D27#8qI(@UDFbXGylf4{A}j5i1a`1fF9g7T@gM&TCb2DU({2Atd@YU!sY z(EiOO>@84LxMNf!ya%JxG;pD+VmqRn-8Dq1MTAU;>YI}5{bFXWZooNo>R1u454oWxAviCN5S+ge9!p*~nCs4tt5Z_aw3 zUK9hH9~#y9=G+J5jk~Kti~4sN2x6f~mBhJ4W^suQ=Nh8UZF{8LqW3?HzWf9-Bvq!K zd_B_K=j+|p*QT|xNOA-dAlBJaThMRb!B!k9o0Mmkh`k2EhOT6wazPNGPy1H++{A5 zL^^FXodxC^4ranbMx##W#M8D8u!s|vieB!Mp=7G&>zm3>D;0{}X%>P$s#-Yxt54eN zYEHHhvu1B_l<6i_s==KPhI0eEWv40heyc9>RxXWQ<0wcGd$`gBH{l`5L!iBM4-L4` zsL~Ff??Jbqrdokmiu0%py6FY|g#aZ7% z!)!tn!gohXnZXk5o;iXw&YO+}HKnba?BjwJ)QdmAXri*(wdfLrIGi zVFf75tu}tV%dFEx3vE<+~hpHUppdnPU9AUdD@*%~N+pf$wDXN9d35AqN z0X;L0SW32h`1ugPPsHd#n3gJHv68V0+cdzxPr`#7Z?0xl(=9nvufwsYXb==`ySgkxc2S3+5<85gM*j%_T5~2 zAU0^$7TGri2ljla9bLOssQpH~I^q=WkuDgg?GiogWF0O$h%{@j+8+M2s`t|C zcG1#cLSSGqtXL&^-AzC)AueaJeC7qGEEdC|2s7xejTeE1Yy?-e8;KmnVnEmE^x$;! zJERBQ(2opeX(F(S>`hIn%;+4*DG^L#ken^ zsFBQQR=0^>EanSTn;ftK5L z#X(?L)sS_-`SdQ~;@>JA&+K}U)q9JJFsUClBnPryY|6GbZAiv4c<06xx$Ydsxxq7R zc7=8~dhDlm!*i}5%yJeVjH@5!=j4>tnGS;}#pv8{fJCMjhV&~*Y4UI75aB;-tFZ^p z25n`w<(OPmxx^uT#6tPCx~40(S=MBCG;fhgpooLJIeJ7QjoiH>cuX}6`ly9 z63$^a;>GVZQA2%Hn68du-KX zSRGa3Bn>%jXfb=VEVdzQU!arL$}xq%T6m(NaPP99%VS>q4aQxoU2IAQ;!#3moM5wQ zFkUndFj5fHrGNV2I|dAt;WVYYJmyUGC=Dlr>1vxs#X4xY6AYVQfZ zH@J;W8{%UE{ZvV}i!DkDmtmf`3&vddZ7QV>O_ST==AWew6nqq{pLTC7gHUP_sM&`? zr)h#Rd_eJMw=ZGnA=3?ZF`*I3y4o|d^h@*1B=SQ-_c+!CVpL8|Q?PwwP#P0%W$&{}&bHEhk=%U><{ln2%<%(NFhdFH0)R7dsT zI(t^AJ_=oD4x>miDi|EWX&z360WA`1Zr@l<-Ld|-jSlP}PD?-cY!_4vqJACP_iVNErc=6xh!R zvrzm*aX}7R947zkP3G;{-2w|?%zUi*duj%~Z!b1qY@SqV`^VY#0zq zpK;jOvphOOkp_q$lb_~TDs07nLbQs)z)`yV9$+pg!HyHACUvt^ev0%|7|UvXMfEqC zIJc}OaJbaU7PTmMhkGqrNRbr2l=?@v$M=`1u@zlBh8L2;<47hCMywNdl;YJMnsX{M zb|mstU3y02#Z-#x6kWlkaBvCr+f@VDDEF@ld@zRqt5U06zC`|Bu(sbSTh)-@G@dW= zCG$6F?HBO5BskXjwD90#PotijVI&!nM9}7Z`hcVXCmyaPU;1NA)+#}F0kROd zZoD8;hWwr~SV2`0vQ-hXRS~jP5wcYgvQ-hXKUWc?DlZwMS21h)(;3dKLD0$Qwqg*< zxnTG%E=Om}2PDQV4WaLLGo&M(G={jWmA&p}i3F#}Z_-DY?cN{y^Ajj!Ld^XAn8vKc zPk3vMnI5kTgFiOV+J!78v!L(q!M|`%9C!&h4x9o8fh3LvW&(?W5}*p$3~U1)2A%?1 zfY*TIKo{WZA|8+iECYPNX5eeU1Hj|JuYlKpHsAzs7D)U=(~^MkKr)a9z;KHvf1 zDd0um9iR)i2=dQZ;96iFa5LZo?gZ`w9tU;;Ex-}r1keRs09olWUg#w?c)ws(Pibv`U{;wSF!6__8Rd$10tst=6iwm0G3d)4cqfq!nxB{L{1v zT7_n)=PM*xZ9;`nUT!@KBcPu&p-Z#%)B44_>{(e^aq^p*ta(&m_jJ$Fc!zdfa&o>0 zQjFUz`@7~?QL=)crmd@5$In3sh^!6=j)Q;ls_ht^PA3EWVq$IfxPI}D{s{vT2M%(& z248UDkf9e{oHXo`;Uh+ly3{@TvN2=FjlX=t6a$y26IyKZ{QjMSO4 zzWAlI^y@P+vu4l9o_oWM^K#}d@GM-EyBG_ZOAG$#rke|wEniV|%gSQ!s#{A+%Wf-Q zT~S$eyRTX|)~sE({>xw4P_uE9BI{;VNSAslODlA*k22k;Wifu{^LL&$S-X}N%j9XE zDsQH@ci7qG)w6wGuZElJ)$@wV4fQ-H>N&l1war>+@Cm+?qC!&Rslj zL2j<)Bd=QS-1&2&UbV~xIq7rf_xLQDmOOdNz=ZS)cTrVUdFjd`y_6wSQdI3;UBs{~ z!e7_DtE+SwvgMUU4BZm1JHs8xyS(%kUy*OUyOcWneBPCM`T9u-o^o$dwU>cip%<+r zCNZK?zr5OAZB$iN`uO54TJ2s%;a6AsyrjY7YE^Lw$~Spn!d33{o?;lJos&Cv zUewIdOG>NVMb*{b)wh(dcNZJJ(u!N%6(qGria|w6D@yg!qVm!&tK<_FOL*ppRM<;Q z_btY)yt~&|8oubVPIAxH-2`1-S*^RvOKU#Ktv1SacjYSg%A)de$&8kgGF`Q@ za&?uO;uEf3S?;^Sy~?OqsoGS{@S>hVRaEOfW2H{z`L8}^mY3%gl~$;_OTDj^daLPO zQEA*-;;ybLTFFX5a0WmT(>bcaqTB15KJC?AcdylXixyk$t(Q>f%8HfVNuR$xBp)eT zvgDCLN>aX_42r|wubnR6jS98uFmifAxJ$f6RaR+9=i2K&qmFA!qavz)>xnn*yz#2_ z;?IaTRpM0{jJ7qUKHVrP@97}vNtJ<=i#c(gwqIUZA;a#)xz3cu4_^xUQfN% zddfVguB5w)y=zKWdV9i#+sM1Fih0APAT84~GgUiZquR$H$8ea{47*ajggv2HM!{`; z!=Jxh!jX!L^dgEd(CYH2X{jc?&wIP!t(L;bC|?v_VCX`URaRH7(%pHbs+JiOCw8~TJZsTodD0S?50fTM(q^)E-|AyE zt0-bcHY#qbs9am|Mfxz@gjupik4{Kn6O~{y+!C1|CzV~0(baDx&%#KT-@Q@KO+2g3 z5Px(|bU!05+5NmN>KW!*w?DG^-Ot~MdhS)#gb)Bk#huhV+|#b}@JUvvtawVr>m5R*U8zes%d|M>pb zKGpwjG%Ef-9sx0R-Tx3U{#?IE4~n}vrsrR5%;)=Kdc|G=+r_|I3{o=`5W=h=FSiIGWATesQ2W$PVZt#4=y+}ZTCySCl^^>5ts&3nIf z-~A7K`@!#g_j?a*fB2C{AA9`!JAUxPAN}~BpZLj>KmC`VJ@xaQPe1eQbHDiI^S}D_ zuIAl)_Wq`&b>IF2FTD7#FTH&5&~FdF^6G1^A9>@=w~qeq_kUGk6IwC9E8RK#-14xVpO%wzb#d|4Jn-}6Xj(eJnV55&Iy!6fE7x>C zFW|H!-nrf?j-*zAbmLZ|TGzB2jB=I64dBX>R(h4MRA>@8MZT3KxU;>t_zVuJ^6iGA z3iU`nlD~ zXta3eR92|3xklJ6(j~4&JdN-g;UtX4ca1}Sn8uRN(X?`HuC5L};=iQY>sxS38Rvw# zJ%?nWc<^mrQMI1V8FLLJhbp5=`C0E)GFlEarJ`HC*H^Af*OugFEt-7oq|AAcAIOue zDFFqcJQRx>TJ1xXsW}ZmJJ1}o3XMY>(NwgUG#tN-1@jjySv*#o#Fr{jxOxbuAhpb9pK?62tatqAe$8HI;A z*M0W)UvKXHy>EX$_08Vj`=+0B-)Db6zPY*O}qIFnS_5Aagx&7B5%Fj|K+XxZM>C5F>|~XULQoJ42xox zq5I0S)RYTwi{6wf3ajBWBKHi+p_ ziDnm76qkcZd?cynR2CcM-q{ds=R><8^qX3iQ0_B)kc=S;=CbQT6xXzqvGcq|YrLQG z|4UCQR>Jw3HqoA2?ggi~ES4OkAnC=$5RJiu;$otiDOD0TqjL3XN;I#ug6wBX47Pr# zlU1_Wr)wQjdMjmEKGGUrw89iyo^Y)s6{*4E^;KTv-ZQ=BURtqF1+KF%j!^NsTkwY} ze*@BeMFjcKvh7PMN>mFKXRTWavPJDlTro2)wNsY!ets=>Zgr*?TKcVCpNHy7*S#w_ z2#%siU~uYUv!Qb;CWrR0dbSuEH>;9(q{`ZFV&_T^2!YdEJhuWCm{9UGtvT8sEF|Ke zD{<2^JeoE{T4q63jy$(f8aODW#cIre0cl^fFD|bpfW=ptDQ{tJ%9rH1o8vM|-c%7! zO4~=3{)wpeTCB*hbHQ=GWzVOr)fm!F#m<9{7$y-inx3P~VctXE9!ak#&aEn~usZd| z7|AfJhr*ew3m2n0UE3vje)@wp?>sT`wJrAi(qeB$Ns(`HWsXpcuV1fwwcY1Vhtc|| z>IZAqXj+jy&!Ua17AUYSG`zm`9H%-;Y#{a!bEV=`yv9^2%y&c)H$cjh66wl&(DxRhtEd zUS;SqdhhKODqrg-GcQ-~p7ZO&tDIzty+F9MtE-B9-tOAw_4c9EN2H8V<0!AlS1Jse zbnV8hMf0=faV{t>=g?GPTLgPS($%zAtvJOCR$1@kr7gmpEAtpkL`ts;p)+7_G2o}s zX8-&9|FZ>li2^!);#w4{a5-IJH_Ab&!om zNmFB|{B7`Sfa6oBRs`+F{GJhhXJJ=y7KQzD!!FCSO1}VC z@@5%U>8!?e11z-K2*3wOS*0FQo?1Z4To-mX@cVXLDc_@j z5#wK(q(2=Cz0y z?uEEF;|fkQ7IzqK*E?z2CAfQWhvVLfE4V^2?kL<$+)HuW{w+;&VYjlEwB!#0!o0J0S}N3%mk(bQ-EaPN?-yo7H|V2fFxiD-~ti>JJ9)O`UEfm z3Ezf$1ULxn1%3%U2|Nls1Uv|A12zCvK!1BrpG%)kqCT1Q`JGq%b=VaC$ryH_z)OO!z2Uq0lAnGi8F(51;AS1Uf?O~U+= 14 and version > best_version: - best_version, best_dir = version, vc_dir - return best_version, best_dir - - -def _msvc14_find_vc2017(): - """Python 3.8 "distutils/_msvccompiler.py" backport - - Returns "15, path" based on the result of invoking vswhere.exe - If no install is found, returns "None, None" - - The version is returned to avoid unnecessarily changing the function - result. It may be ignored when the path is not None. - - If vswhere.exe is not available, by definition, VS 2017 is not - installed. - """ - root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles") - if not root: - return None, None - - try: - path = subprocess.check_output([ - join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), - "-latest", - "-prerelease", - "-requiresAny", - "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "-requires", "Microsoft.VisualStudio.Workload.WDExpress", - "-property", "installationPath", - "-products", "*", - ]).decode(encoding="mbcs", errors="strict").strip() - except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): - return None, None - - path = join(path, "VC", "Auxiliary", "Build") - if isdir(path): - return 15, path - - return None, None - - -PLAT_SPEC_TO_RUNTIME = { - 'x86': 'x86', - 'x86_amd64': 'x64', - 'x86_arm': 'arm', - 'x86_arm64': 'arm64' -} - - -def _msvc14_find_vcvarsall(plat_spec): - """Python 3.8 "distutils/_msvccompiler.py" backport""" - _, best_dir = _msvc14_find_vc2017() - vcruntime = None - - if plat_spec in PLAT_SPEC_TO_RUNTIME: - vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec] - else: - vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86' - - if best_dir: - vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**", - vcruntime_plat, "Microsoft.VC14*.CRT", - "vcruntime140.dll") - try: - import glob - vcruntime = glob.glob(vcredist, recursive=True)[-1] - except (ImportError, OSError, LookupError): - vcruntime = None - - if not best_dir: - best_version, best_dir = _msvc14_find_vc2015() - if best_version: - vcruntime = join(best_dir, 'redist', vcruntime_plat, - "Microsoft.VC140.CRT", "vcruntime140.dll") - - if not best_dir: - return None, None - - vcvarsall = join(best_dir, "vcvarsall.bat") - if not isfile(vcvarsall): - return None, None - - if not vcruntime or not isfile(vcruntime): - vcruntime = None - - return vcvarsall, vcruntime - - -def _msvc14_get_vc_env(plat_spec): - """Python 3.8 "distutils/_msvccompiler.py" backport""" - if "DISTUTILS_USE_SDK" in environ: - return { - key.lower(): value - for key, value in environ.items() - } - - vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec) - if not vcvarsall: - raise distutils.errors.DistutilsPlatformError( - "Unable to find vcvarsall.bat" - ) - - try: - out = subprocess.check_output( - 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), - stderr=subprocess.STDOUT, - ).decode('utf-16le', errors='replace') - except subprocess.CalledProcessError as exc: - raise distutils.errors.DistutilsPlatformError( - "Error executing {}".format(exc.cmd) - ) from exc - - env = { - key.lower(): value - for key, _, value in - (line.partition('=') for line in out.splitlines()) - if key and value - } - - if vcruntime: - env['py_vcruntime_redist'] = vcruntime - return env - - -def msvc14_get_vc_env(plat_spec): - """ - Patched "distutils._msvccompiler._get_vc_env" for support extra - Microsoft Visual C++ 14.X compilers. - - Set environment without use of "vcvarsall.bat". - - Parameters - ---------- - plat_spec: str - Target architecture. - - Return - ------ - dict - environment - """ - - # Always use backport from CPython 3.8 - try: - return _msvc14_get_vc_env(plat_spec) - except distutils.errors.DistutilsPlatformError as exc: - _augment_exception(exc, 14.0) - raise - - -def msvc14_gen_lib_options(*args, **kwargs): - """ - Patched "distutils._msvccompiler.gen_lib_options" for fix - compatibility between "numpy.distutils" and "distutils._msvccompiler" - (for Numpy < 1.11.2) - """ - if "numpy.distutils" in sys.modules: - import numpy as np - if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'): - return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) - return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs) - - -def _augment_exception(exc, version, arch=''): - """ - Add details to the exception message to help guide the user - as to what action will resolve it. - """ - # Error if MSVC++ directory not found or environment not set - message = exc.args[0] - - if "vcvarsall" in message.lower() or "visual c" in message.lower(): - # Special error message if MSVC++ not installed - tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.' - message = tmpl.format(**locals()) - msdownload = 'www.microsoft.com/download/details.aspx?id=%d' - if version == 9.0: - if arch.lower().find('ia64') > -1: - # For VC++ 9.0, if IA64 support is needed, redirect user - # to Windows SDK 7.0. - # Note: No download link available from Microsoft. - message += ' Get it with "Microsoft Windows SDK 7.0"' - else: - # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : - # This redirection link is maintained by Microsoft. - # Contact vspython@microsoft.com if it needs updating. - message += ' Get it from http://aka.ms/vcpython27' - elif version == 10.0: - # For VC++ 10.0 Redirect user to Windows SDK 7.1 - message += ' Get it with "Microsoft Windows SDK 7.1": ' - message += msdownload % 8279 - elif version >= 14.0: - # For VC++ 14.X Redirect user to latest Visual C++ Build Tools - message += (' Get it with "Microsoft C++ Build Tools": ' - r'https://visualstudio.microsoft.com' - r'/visual-cpp-build-tools/') - - exc.args = (message, ) - - -class PlatformInfo: - """ - Current and Target Architectures information. - - Parameters - ---------- - arch: str - Target architecture. - """ - current_cpu = environ.get('processor_architecture', '').lower() - - def __init__(self, arch): - self.arch = arch.lower().replace('x64', 'amd64') - - @property - def target_cpu(self): - """ - Return Target CPU architecture. - - Return - ------ - str - Target CPU - """ - return self.arch[self.arch.find('_') + 1:] - - def target_is_x86(self): - """ - Return True if target CPU is x86 32 bits.. - - Return - ------ - bool - CPU is x86 32 bits - """ - return self.target_cpu == 'x86' - - def current_is_x86(self): - """ - Return True if current CPU is x86 32 bits.. - - Return - ------ - bool - CPU is x86 32 bits - """ - return self.current_cpu == 'x86' - - def current_dir(self, hidex86=False, x64=False): - """ - Current platform specific subfolder. - - Parameters - ---------- - hidex86: bool - return '' and not '\x86' if architecture is x86. - x64: bool - return '\x64' and not '\amd64' if architecture is amd64. - - Return - ------ - str - subfolder: '\target', or '' (see hidex86 parameter) - """ - return ( - '' if (self.current_cpu == 'x86' and hidex86) else - r'\x64' if (self.current_cpu == 'amd64' and x64) else - r'\%s' % self.current_cpu - ) - - def target_dir(self, hidex86=False, x64=False): - r""" - Target platform specific subfolder. - - Parameters - ---------- - hidex86: bool - return '' and not '\x86' if architecture is x86. - x64: bool - return '\x64' and not '\amd64' if architecture is amd64. - - Return - ------ - str - subfolder: '\current', or '' (see hidex86 parameter) - """ - return ( - '' if (self.target_cpu == 'x86' and hidex86) else - r'\x64' if (self.target_cpu == 'amd64' and x64) else - r'\%s' % self.target_cpu - ) - - def cross_dir(self, forcex86=False): - r""" - Cross platform specific subfolder. - - Parameters - ---------- - forcex86: bool - Use 'x86' as current architecture even if current architecture is - not x86. - - Return - ------ - str - subfolder: '' if target architecture is current architecture, - '\current_target' if not. - """ - current = 'x86' if forcex86 else self.current_cpu - return ( - '' if self.target_cpu == current else - self.target_dir().replace('\\', '\\%s_' % current) - ) - - -class RegistryInfo: - """ - Microsoft Visual Studio related registry information. - - Parameters - ---------- - platform_info: PlatformInfo - "PlatformInfo" instance. - """ - HKEYS = (winreg.HKEY_USERS, - winreg.HKEY_CURRENT_USER, - winreg.HKEY_LOCAL_MACHINE, - winreg.HKEY_CLASSES_ROOT) - - def __init__(self, platform_info): - self.pi = platform_info - - @property - def visualstudio(self): - """ - Microsoft Visual Studio root registry key. - - Return - ------ - str - Registry key - """ - return 'VisualStudio' - - @property - def sxs(self): - """ - Microsoft Visual Studio SxS registry key. - - Return - ------ - str - Registry key - """ - return join(self.visualstudio, 'SxS') - - @property - def vc(self): - """ - Microsoft Visual C++ VC7 registry key. - - Return - ------ - str - Registry key - """ - return join(self.sxs, 'VC7') - - @property - def vs(self): - """ - Microsoft Visual Studio VS7 registry key. - - Return - ------ - str - Registry key - """ - return join(self.sxs, 'VS7') - - @property - def vc_for_python(self): - """ - Microsoft Visual C++ for Python registry key. - - Return - ------ - str - Registry key - """ - return r'DevDiv\VCForPython' - - @property - def microsoft_sdk(self): - """ - Microsoft SDK registry key. - - Return - ------ - str - Registry key - """ - return 'Microsoft SDKs' - - @property - def windows_sdk(self): - """ - Microsoft Windows/Platform SDK registry key. - - Return - ------ - str - Registry key - """ - return join(self.microsoft_sdk, 'Windows') - - @property - def netfx_sdk(self): - """ - Microsoft .NET Framework SDK registry key. - - Return - ------ - str - Registry key - """ - return join(self.microsoft_sdk, 'NETFXSDK') - - @property - def windows_kits_roots(self): - """ - Microsoft Windows Kits Roots registry key. - - Return - ------ - str - Registry key - """ - return r'Windows Kits\Installed Roots' - - def microsoft(self, key, x86=False): - """ - Return key in Microsoft software registry. - - Parameters - ---------- - key: str - Registry key path where look. - x86: str - Force x86 software registry. - - Return - ------ - str - Registry key - """ - node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node' - return join('Software', node64, 'Microsoft', key) - - def lookup(self, key, name): - """ - Look for values in registry in Microsoft software registry. - - Parameters - ---------- - key: str - Registry key path where look. - name: str - Value name to find. - - Return - ------ - str - value - """ - key_read = winreg.KEY_READ - openkey = winreg.OpenKey - closekey = winreg.CloseKey - ms = self.microsoft - for hkey in self.HKEYS: - bkey = None - try: - bkey = openkey(hkey, ms(key), 0, key_read) - except (OSError, IOError): - if not self.pi.current_is_x86(): - try: - bkey = openkey(hkey, ms(key, True), 0, key_read) - except (OSError, IOError): - continue - else: - continue - try: - return winreg.QueryValueEx(bkey, name)[0] - except (OSError, IOError): - pass - finally: - if bkey: - closekey(bkey) - - -class SystemInfo: - """ - Microsoft Windows and Visual Studio related system information. - - Parameters - ---------- - registry_info: RegistryInfo - "RegistryInfo" instance. - vc_ver: float - Required Microsoft Visual C++ version. - """ - - # Variables and properties in this class use originals CamelCase variables - # names from Microsoft source files for more easy comparison. - WinDir = environ.get('WinDir', '') - ProgramFiles = environ.get('ProgramFiles', '') - ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles) - - def __init__(self, registry_info, vc_ver=None): - self.ri = registry_info - self.pi = self.ri.pi - - self.known_vs_paths = self.find_programdata_vs_vers() - - # Except for VS15+, VC version is aligned with VS version - self.vs_ver = self.vc_ver = ( - vc_ver or self._find_latest_available_vs_ver()) - - def _find_latest_available_vs_ver(self): - """ - Find the latest VC version - - Return - ------ - float - version - """ - reg_vc_vers = self.find_reg_vs_vers() - - if not (reg_vc_vers or self.known_vs_paths): - raise distutils.errors.DistutilsPlatformError( - 'No Microsoft Visual C++ version found') - - vc_vers = set(reg_vc_vers) - vc_vers.update(self.known_vs_paths) - return sorted(vc_vers)[-1] - - def find_reg_vs_vers(self): - """ - Find Microsoft Visual Studio versions available in registry. - - Return - ------ - list of float - Versions - """ - ms = self.ri.microsoft - vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs) - vs_vers = [] - for hkey, key in itertools.product(self.ri.HKEYS, vckeys): - try: - bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ) - except (OSError, IOError): - continue - with bkey: - subkeys, values, _ = winreg.QueryInfoKey(bkey) - for i in range(values): - with contextlib.suppress(ValueError): - ver = float(winreg.EnumValue(bkey, i)[0]) - if ver not in vs_vers: - vs_vers.append(ver) - for i in range(subkeys): - with contextlib.suppress(ValueError): - ver = float(winreg.EnumKey(bkey, i)) - if ver not in vs_vers: - vs_vers.append(ver) - return sorted(vs_vers) - - def find_programdata_vs_vers(self): - r""" - Find Visual studio 2017+ versions from information in - "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances". - - Return - ------ - dict - float version as key, path as value. - """ - vs_versions = {} - instances_dir = \ - r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances' - - try: - hashed_names = listdir(instances_dir) - - except (OSError, IOError): - # Directory not exists with all Visual Studio versions - return vs_versions - - for name in hashed_names: - try: - # Get VS installation path from "state.json" file - state_path = join(instances_dir, name, 'state.json') - with open(state_path, 'rt', encoding='utf-8') as state_file: - state = json.load(state_file) - vs_path = state['installationPath'] - - # Raises OSError if this VS installation does not contain VC - listdir(join(vs_path, r'VC\Tools\MSVC')) - - # Store version and path - vs_versions[self._as_float_version( - state['installationVersion'])] = vs_path - - except (OSError, IOError, KeyError): - # Skip if "state.json" file is missing or bad format - continue - - return vs_versions - - @staticmethod - def _as_float_version(version): - """ - Return a string version as a simplified float version (major.minor) - - Parameters - ---------- - version: str - Version. - - Return - ------ - float - version - """ - return float('.'.join(version.split('.')[:2])) - - @property - def VSInstallDir(self): - """ - Microsoft Visual Studio directory. - - Return - ------ - str - path - """ - # Default path - default = join(self.ProgramFilesx86, - 'Microsoft Visual Studio %0.1f' % self.vs_ver) - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default - - @property - def VCInstallDir(self): - """ - Microsoft Visual C++ directory. - - Return - ------ - str - path - """ - path = self._guess_vc() or self._guess_vc_legacy() - - if not isdir(path): - msg = 'Microsoft Visual C++ directory not found' - raise distutils.errors.DistutilsPlatformError(msg) - - return path - - def _guess_vc(self): - """ - Locate Visual C++ for VS2017+. - - Return - ------ - str - path - """ - if self.vs_ver <= 14.0: - return '' - - try: - # First search in known VS paths - vs_dir = self.known_vs_paths[self.vs_ver] - except KeyError: - # Else, search with path from registry - vs_dir = self.VSInstallDir - - guess_vc = join(vs_dir, r'VC\Tools\MSVC') - - # Subdir with VC exact version as name - try: - # Update the VC version with real one instead of VS version - vc_ver = listdir(guess_vc)[-1] - self.vc_ver = self._as_float_version(vc_ver) - return join(guess_vc, vc_ver) - except (OSError, IOError, IndexError): - return '' - - def _guess_vc_legacy(self): - """ - Locate Visual C++ for versions prior to 2017. - - Return - ------ - str - path - """ - default = join(self.ProgramFilesx86, - r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver) - - # Try to get "VC++ for Python" path from registry as default path - reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver) - python_vc = self.ri.lookup(reg_path, 'installdir') - default_vc = join(python_vc, 'VC') if python_vc else default - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc - - @property - def WindowsSdkVersion(self): - """ - Microsoft Windows SDK versions for specified MSVC++ version. - - Return - ------ - tuple of str - versions - """ - if self.vs_ver <= 9.0: - return '7.0', '6.1', '6.0a' - elif self.vs_ver == 10.0: - return '7.1', '7.0a' - elif self.vs_ver == 11.0: - return '8.0', '8.0a' - elif self.vs_ver == 12.0: - return '8.1', '8.1a' - elif self.vs_ver >= 14.0: - return '10.0', '8.1' - - @property - def WindowsSdkLastVersion(self): - """ - Microsoft Windows SDK last version. - - Return - ------ - str - version - """ - return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib')) - - @property # noqa: C901 - def WindowsSdkDir(self): # noqa: C901 # is too complex (12) # FIXME - """ - Microsoft Windows SDK directory. - - Return - ------ - str - path - """ - sdkdir = '' - for ver in self.WindowsSdkVersion: - # Try to get it from registry - loc = join(self.ri.windows_sdk, 'v%s' % ver) - sdkdir = self.ri.lookup(loc, 'installationfolder') - if sdkdir: - break - if not sdkdir or not isdir(sdkdir): - # Try to get "VC++ for Python" version from registry - path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) - install_base = self.ri.lookup(path, 'installdir') - if install_base: - sdkdir = join(install_base, 'WinSDK') - if not sdkdir or not isdir(sdkdir): - # If fail, use default new path - for ver in self.WindowsSdkVersion: - intver = ver[:ver.rfind('.')] - path = r'Microsoft SDKs\Windows Kits\%s' % intver - d = join(self.ProgramFiles, path) - if isdir(d): - sdkdir = d - if not sdkdir or not isdir(sdkdir): - # If fail, use default old path - for ver in self.WindowsSdkVersion: - path = r'Microsoft SDKs\Windows\v%s' % ver - d = join(self.ProgramFiles, path) - if isdir(d): - sdkdir = d - if not sdkdir: - # If fail, use Platform SDK - sdkdir = join(self.VCInstallDir, 'PlatformSDK') - return sdkdir - - @property - def WindowsSDKExecutablePath(self): - """ - Microsoft Windows SDK executable directory. - - Return - ------ - str - path - """ - # Find WinSDK NetFx Tools registry dir name - if self.vs_ver <= 11.0: - netfxver = 35 - arch = '' - else: - netfxver = 40 - hidex86 = True if self.vs_ver <= 12.0 else False - arch = self.pi.current_dir(x64=True, hidex86=hidex86) - fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) - - # list all possibles registry paths - regpaths = [] - if self.vs_ver >= 14.0: - for ver in self.NetFxSdkVersion: - regpaths += [join(self.ri.netfx_sdk, ver, fx)] - - for ver in self.WindowsSdkVersion: - regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)] - - # Return installation folder from the more recent path - for path in regpaths: - execpath = self.ri.lookup(path, 'installationfolder') - if execpath: - return execpath - - @property - def FSharpInstallDir(self): - """ - Microsoft Visual F# directory. - - Return - ------ - str - path - """ - path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver) - return self.ri.lookup(path, 'productdir') or '' - - @property - def UniversalCRTSdkDir(self): - """ - Microsoft Universal CRT SDK directory. - - Return - ------ - str - path - """ - # Set Kit Roots versions for specified MSVC++ version - vers = ('10', '81') if self.vs_ver >= 14.0 else () - - # Find path of the more recent Kit - for ver in vers: - sdkdir = self.ri.lookup(self.ri.windows_kits_roots, - 'kitsroot%s' % ver) - if sdkdir: - return sdkdir or '' - - @property - def UniversalCRTSdkLastVersion(self): - """ - Microsoft Universal C Runtime SDK last version. - - Return - ------ - str - version - """ - return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib')) - - @property - def NetFxSdkVersion(self): - """ - Microsoft .NET Framework SDK versions. - - Return - ------ - tuple of str - versions - """ - # Set FxSdk versions for specified VS version - return (('4.7.2', '4.7.1', '4.7', - '4.6.2', '4.6.1', '4.6', - '4.5.2', '4.5.1', '4.5') - if self.vs_ver >= 14.0 else ()) - - @property - def NetFxSdkDir(self): - """ - Microsoft .NET Framework SDK directory. - - Return - ------ - str - path - """ - sdkdir = '' - for ver in self.NetFxSdkVersion: - loc = join(self.ri.netfx_sdk, ver) - sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') - if sdkdir: - break - return sdkdir - - @property - def FrameworkDir32(self): - """ - Microsoft .NET Framework 32bit directory. - - Return - ------ - str - path - """ - # Default path - guess_fw = join(self.WinDir, r'Microsoft.NET\Framework') - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw - - @property - def FrameworkDir64(self): - """ - Microsoft .NET Framework 64bit directory. - - Return - ------ - str - path - """ - # Default path - guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64') - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw - - @property - def FrameworkVersion32(self): - """ - Microsoft .NET Framework 32bit versions. - - Return - ------ - tuple of str - versions - """ - return self._find_dot_net_versions(32) - - @property - def FrameworkVersion64(self): - """ - Microsoft .NET Framework 64bit versions. - - Return - ------ - tuple of str - versions - """ - return self._find_dot_net_versions(64) - - def _find_dot_net_versions(self, bits): - """ - Find Microsoft .NET Framework versions. - - Parameters - ---------- - bits: int - Platform number of bits: 32 or 64. - - Return - ------ - tuple of str - versions - """ - # Find actual .NET version in registry - reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) - dot_net_dir = getattr(self, 'FrameworkDir%d' % bits) - ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or '' - - # Set .NET versions for specified MSVC++ version - if self.vs_ver >= 12.0: - return ver, 'v4.0' - elif self.vs_ver >= 10.0: - return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5' - elif self.vs_ver == 9.0: - return 'v3.5', 'v2.0.50727' - elif self.vs_ver == 8.0: - return 'v3.0', 'v2.0.50727' - - @staticmethod - def _use_last_dir_name(path, prefix=''): - """ - Return name of the last dir in path or '' if no dir found. - - Parameters - ---------- - path: str - Use dirs in this path - prefix: str - Use only dirs starting by this prefix - - Return - ------ - str - name - """ - matching_dirs = ( - dir_name - for dir_name in reversed(listdir(path)) - if isdir(join(path, dir_name)) and - dir_name.startswith(prefix) - ) - return next(matching_dirs, None) or '' - - -class EnvironmentInfo: - """ - Return environment variables for specified Microsoft Visual C++ version - and platform : Lib, Include, Path and libpath. - - This function is compatible with Microsoft Visual C++ 9.0 to 14.X. - - Script created by analysing Microsoft environment configuration files like - "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... - - Parameters - ---------- - arch: str - Target architecture. - vc_ver: float - Required Microsoft Visual C++ version. If not set, autodetect the last - version. - vc_min_ver: float - Minimum Microsoft Visual C++ version. - """ - - # Variables and properties in this class use originals CamelCase variables - # names from Microsoft source files for more easy comparison. - - def __init__(self, arch, vc_ver=None, vc_min_ver=0): - self.pi = PlatformInfo(arch) - self.ri = RegistryInfo(self.pi) - self.si = SystemInfo(self.ri, vc_ver) - - if self.vc_ver < vc_min_ver: - err = 'No suitable Microsoft Visual C++ version found' - raise distutils.errors.DistutilsPlatformError(err) - - @property - def vs_ver(self): - """ - Microsoft Visual Studio. - - Return - ------ - float - version - """ - return self.si.vs_ver - - @property - def vc_ver(self): - """ - Microsoft Visual C++ version. - - Return - ------ - float - version - """ - return self.si.vc_ver - - @property - def VSTools(self): - """ - Microsoft Visual Studio Tools. - - Return - ------ - list of str - paths - """ - paths = [r'Common7\IDE', r'Common7\Tools'] - - if self.vs_ver >= 14.0: - arch_subdir = self.pi.current_dir(hidex86=True, x64=True) - paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] - paths += [r'Team Tools\Performance Tools'] - paths += [r'Team Tools\Performance Tools%s' % arch_subdir] - - return [join(self.si.VSInstallDir, path) for path in paths] - - @property - def VCIncludes(self): - """ - Microsoft Visual C++ & Microsoft Foundation Class Includes. - - Return - ------ - list of str - paths - """ - return [join(self.si.VCInstallDir, 'Include'), - join(self.si.VCInstallDir, r'ATLMFC\Include')] - - @property - def VCLibraries(self): - """ - Microsoft Visual C++ & Microsoft Foundation Class Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver >= 15.0: - arch_subdir = self.pi.target_dir(x64=True) - else: - arch_subdir = self.pi.target_dir(hidex86=True) - paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] - - if self.vs_ver >= 14.0: - paths += [r'Lib\store%s' % arch_subdir] - - return [join(self.si.VCInstallDir, path) for path in paths] - - @property - def VCStoreRefs(self): - """ - Microsoft Visual C++ store references Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - return [join(self.si.VCInstallDir, r'Lib\store\references')] - - @property - def VCTools(self): - """ - Microsoft Visual C++ Tools. - - Return - ------ - list of str - paths - """ - si = self.si - tools = [join(si.VCInstallDir, 'VCPackages')] - - forcex86 = True if self.vs_ver <= 10.0 else False - arch_subdir = self.pi.cross_dir(forcex86) - if arch_subdir: - tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)] - - if self.vs_ver == 14.0: - path = 'Bin%s' % self.pi.current_dir(hidex86=True) - tools += [join(si.VCInstallDir, path)] - - elif self.vs_ver >= 15.0: - host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else - r'bin\HostX64%s') - tools += [join( - si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))] - - if self.pi.current_cpu != self.pi.target_cpu: - tools += [join( - si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))] - - else: - tools += [join(si.VCInstallDir, 'Bin')] - - return tools - - @property - def OSLibraries(self): - """ - Microsoft Windows SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver <= 10.0: - arch_subdir = self.pi.target_dir(hidex86=True, x64=True) - return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] - - else: - arch_subdir = self.pi.target_dir(x64=True) - lib = join(self.si.WindowsSdkDir, 'lib') - libver = self._sdk_subdir - return [join(lib, '%sum%s' % (libver, arch_subdir))] - - @property - def OSIncludes(self): - """ - Microsoft Windows SDK Include. - - Return - ------ - list of str - paths - """ - include = join(self.si.WindowsSdkDir, 'include') - - if self.vs_ver <= 10.0: - return [include, join(include, 'gl')] - - else: - if self.vs_ver >= 14.0: - sdkver = self._sdk_subdir - else: - sdkver = '' - return [join(include, '%sshared' % sdkver), - join(include, '%sum' % sdkver), - join(include, '%swinrt' % sdkver)] - - @property - def OSLibpath(self): - """ - Microsoft Windows SDK Libraries Paths. - - Return - ------ - list of str - paths - """ - ref = join(self.si.WindowsSdkDir, 'References') - libpath = [] - - if self.vs_ver <= 9.0: - libpath += self.OSLibraries - - if self.vs_ver >= 11.0: - libpath += [join(ref, r'CommonConfiguration\Neutral')] - - if self.vs_ver >= 14.0: - libpath += [ - ref, - join(self.si.WindowsSdkDir, 'UnionMetadata'), - join( - ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'), - join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'), - join( - ref, 'Windows.Networking.Connectivity.WwanContract', - '1.0.0.0'), - join( - self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs', - '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration', - 'neutral'), - ] - return libpath - - @property - def SdkTools(self): - """ - Microsoft Windows SDK Tools. - - Return - ------ - list of str - paths - """ - return list(self._sdk_tools()) - - def _sdk_tools(self): - """ - Microsoft Windows SDK Tools paths generator. - - Return - ------ - generator of str - paths - """ - if self.vs_ver < 15.0: - bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86' - yield join(self.si.WindowsSdkDir, bin_dir) - - if not self.pi.current_is_x86(): - arch_subdir = self.pi.current_dir(x64=True) - path = 'Bin%s' % arch_subdir - yield join(self.si.WindowsSdkDir, path) - - if self.vs_ver in (10.0, 11.0): - if self.pi.target_is_x86(): - arch_subdir = '' - else: - arch_subdir = self.pi.current_dir(hidex86=True, x64=True) - path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir - yield join(self.si.WindowsSdkDir, path) - - elif self.vs_ver >= 15.0: - path = join(self.si.WindowsSdkDir, 'Bin') - arch_subdir = self.pi.current_dir(x64=True) - sdkver = self.si.WindowsSdkLastVersion - yield join(path, '%s%s' % (sdkver, arch_subdir)) - - if self.si.WindowsSDKExecutablePath: - yield self.si.WindowsSDKExecutablePath - - @property - def _sdk_subdir(self): - """ - Microsoft Windows SDK version subdir. - - Return - ------ - str - subdir - """ - ucrtver = self.si.WindowsSdkLastVersion - return ('%s\\' % ucrtver) if ucrtver else '' - - @property - def SdkSetup(self): - """ - Microsoft Windows SDK Setup. - - Return - ------ - list of str - paths - """ - if self.vs_ver > 9.0: - return [] - - return [join(self.si.WindowsSdkDir, 'Setup')] - - @property - def FxTools(self): - """ - Microsoft .NET Framework Tools. - - Return - ------ - list of str - paths - """ - pi = self.pi - si = self.si - - if self.vs_ver <= 10.0: - include32 = True - include64 = not pi.target_is_x86() and not pi.current_is_x86() - else: - include32 = pi.target_is_x86() or pi.current_is_x86() - include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' - - tools = [] - if include32: - tools += [join(si.FrameworkDir32, ver) - for ver in si.FrameworkVersion32] - if include64: - tools += [join(si.FrameworkDir64, ver) - for ver in si.FrameworkVersion64] - return tools - - @property - def NetFxSDKLibraries(self): - """ - Microsoft .Net Framework SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: - return [] - - arch_subdir = self.pi.target_dir(x64=True) - return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] - - @property - def NetFxSDKIncludes(self): - """ - Microsoft .Net Framework SDK Includes. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: - return [] - - return [join(self.si.NetFxSdkDir, r'include\um')] - - @property - def VsTDb(self): - """ - Microsoft Visual Studio Team System Database. - - Return - ------ - list of str - paths - """ - return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')] - - @property - def MSBuild(self): - """ - Microsoft Build Engine. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 12.0: - return [] - elif self.vs_ver < 15.0: - base_path = self.si.ProgramFilesx86 - arch_subdir = self.pi.current_dir(hidex86=True) - else: - base_path = self.si.VSInstallDir - arch_subdir = '' - - path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir) - build = [join(base_path, path)] - - if self.vs_ver >= 15.0: - # Add Roslyn C# & Visual Basic Compiler - build += [join(base_path, path, 'Roslyn')] - - return build - - @property - def HTMLHelpWorkshop(self): - """ - Microsoft HTML Help Workshop. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 11.0: - return [] - - return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')] - - @property - def UCRTLibraries(self): - """ - Microsoft Universal C Runtime SDK Libraries. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - - arch_subdir = self.pi.target_dir(x64=True) - lib = join(self.si.UniversalCRTSdkDir, 'lib') - ucrtver = self._ucrt_subdir - return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] - - @property - def UCRTIncludes(self): - """ - Microsoft Universal C Runtime SDK Include. - - Return - ------ - list of str - paths - """ - if self.vs_ver < 14.0: - return [] - - include = join(self.si.UniversalCRTSdkDir, 'include') - return [join(include, '%sucrt' % self._ucrt_subdir)] - - @property - def _ucrt_subdir(self): - """ - Microsoft Universal C Runtime SDK version subdir. - - Return - ------ - str - subdir - """ - ucrtver = self.si.UniversalCRTSdkLastVersion - return ('%s\\' % ucrtver) if ucrtver else '' - - @property - def FSharp(self): - """ - Microsoft Visual F#. - - Return - ------ - list of str - paths - """ - if 11.0 > self.vs_ver > 12.0: - return [] - - return [self.si.FSharpInstallDir] - - @property - def VCRuntimeRedist(self): - """ - Microsoft Visual C++ runtime redistributable dll. - - Return - ------ - str - path - """ - vcruntime = 'vcruntime%d0.dll' % self.vc_ver - arch_subdir = self.pi.target_dir(x64=True).strip('\\') - - # Installation prefixes candidates - prefixes = [] - tools_path = self.si.VCInstallDir - redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist')) - if isdir(redist_path): - # Redist version may not be exactly the same as tools - redist_path = join(redist_path, listdir(redist_path)[-1]) - prefixes += [redist_path, join(redist_path, 'onecore')] - - prefixes += [join(tools_path, 'redist')] # VS14 legacy path - - # CRT directory - crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10), - # Sometime store in directory with VS version instead of VC - 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10)) - - # vcruntime path - for prefix, crt_dir in itertools.product(prefixes, crt_dirs): - path = join(prefix, arch_subdir, crt_dir, vcruntime) - if isfile(path): - return path - - def return_env(self, exists=True): - """ - Return environment dict. - - Parameters - ---------- - exists: bool - It True, only return existing paths. - - Return - ------ - dict - environment - """ - env = dict( - include=self._build_paths('include', - [self.VCIncludes, - self.OSIncludes, - self.UCRTIncludes, - self.NetFxSDKIncludes], - exists), - lib=self._build_paths('lib', - [self.VCLibraries, - self.OSLibraries, - self.FxTools, - self.UCRTLibraries, - self.NetFxSDKLibraries], - exists), - libpath=self._build_paths('libpath', - [self.VCLibraries, - self.FxTools, - self.VCStoreRefs, - self.OSLibpath], - exists), - path=self._build_paths('path', - [self.VCTools, - self.VSTools, - self.VsTDb, - self.SdkTools, - self.SdkSetup, - self.FxTools, - self.MSBuild, - self.HTMLHelpWorkshop, - self.FSharp], - exists), - ) - if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist): - env['py_vcruntime_redist'] = self.VCRuntimeRedist - return env - - def _build_paths(self, name, spec_path_lists, exists): - """ - Given an environment variable name and specified paths, - return a pathsep-separated string of paths containing - unique, extant, directories from those paths and from - the environment variable. Raise an error if no paths - are resolved. - - Parameters - ---------- - name: str - Environment variable name - spec_path_lists: list of str - Paths - exists: bool - It True, only return existing paths. - - Return - ------ - str - Pathsep-separated paths - """ - # flatten spec_path_lists - spec_paths = itertools.chain.from_iterable(spec_path_lists) - env_paths = environ.get(name, '').split(pathsep) - paths = itertools.chain(spec_paths, env_paths) - extant_paths = list(filter(isdir, paths)) if exists else paths - if not extant_paths: - msg = "%s environment variable is empty" % name.upper() - raise distutils.errors.DistutilsPlatformError(msg) - unique_paths = unique_everseen(extant_paths) - return pathsep.join(unique_paths) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/namespaces.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/namespaces.py deleted file mode 100755 index 44939e1..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/namespaces.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from distutils import log -import itertools - - -flatten = itertools.chain.from_iterable - - -class Installer: - - nspkg_ext = '-nspkg.pth' - - def install_namespaces(self): - nsp = self._get_all_ns_packages() - if not nsp: - return - filename, ext = os.path.splitext(self._get_target()) - filename += self.nspkg_ext - self.outputs.append(filename) - log.info("Installing %s", filename) - lines = map(self._gen_nspkg_line, nsp) - - if self.dry_run: - # always generate the lines, even in dry run - list(lines) - return - - with open(filename, 'wt') as f: - f.writelines(lines) - - def uninstall_namespaces(self): - filename, ext = os.path.splitext(self._get_target()) - filename += self.nspkg_ext - if not os.path.exists(filename): - return - log.info("Removing %s", filename) - os.remove(filename) - - def _get_target(self): - return self.target - - _nspkg_tmpl = ( - "import sys, types, os", - "has_mfs = sys.version_info > (3, 5)", - "p = os.path.join(%(root)s, *%(pth)r)", - "importlib = has_mfs and __import__('importlib.util')", - "has_mfs and __import__('importlib.machinery')", - ( - "m = has_mfs and " - "sys.modules.setdefault(%(pkg)r, " - "importlib.util.module_from_spec(" - "importlib.machinery.PathFinder.find_spec(%(pkg)r, " - "[os.path.dirname(p)])))" - ), - ( - "m = m or " - "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))" - ), - "mp = (m or []) and m.__dict__.setdefault('__path__',[])", - "(p not in mp) and mp.append(p)", - ) - "lines for the namespace installer" - - _nspkg_tmpl_multi = ( - 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', - ) - "additional line(s) when a parent package is indicated" - - def _get_root(self): - return "sys._getframe(1).f_locals['sitedir']" - - def _gen_nspkg_line(self, pkg): - pth = tuple(pkg.split('.')) - root = self._get_root() - tmpl_lines = self._nspkg_tmpl - parent, sep, child = pkg.rpartition('.') - if parent: - tmpl_lines += self._nspkg_tmpl_multi - return ';'.join(tmpl_lines) % locals() + '\n' - - def _get_all_ns_packages(self): - """Return sorted list of all package namespaces""" - pkgs = self.distribution.namespace_packages or [] - return sorted(flatten(map(self._pkg_names, pkgs))) - - @staticmethod - def _pkg_names(pkg): - """ - Given a namespace package, yield the components of that - package. - - >>> names = Installer._pkg_names('a.b.c') - >>> set(names) == set(['a', 'a.b', 'a.b.c']) - True - """ - parts = pkg.split('.') - while parts: - yield '.'.join(parts) - parts.pop() - - -class DevelopInstaller(Installer): - def _get_root(self): - return repr(str(self.egg_path)) - - def _get_target(self): - return self.egg_link diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/package_index.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/package_index.py deleted file mode 100755 index 051e523..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/package_index.py +++ /dev/null @@ -1,1127 +0,0 @@ -"""PyPI and direct package downloading""" -import sys -import os -import re -import io -import shutil -import socket -import base64 -import hashlib -import itertools -import warnings -import configparser -import html -import http.client -import urllib.parse -import urllib.request -import urllib.error -from functools import wraps - -import setuptools -from pkg_resources import ( - CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, - Environment, find_distributions, safe_name, safe_version, - to_filename, Requirement, DEVELOP_DIST, EGG_DIST, parse_version, -) -from distutils import log -from distutils.errors import DistutilsError -from fnmatch import translate -from setuptools.wheel import Wheel -from setuptools.extern.more_itertools import unique_everseen - - -EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$') -HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I) -PYPI_MD5 = re.compile( - r'([^<]+)\n\s+\(md5\)' -) -URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match -EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() - -__all__ = [ - 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', - 'interpret_distro_name', -] - -_SOCKET_TIMEOUT = 15 - -_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}" -user_agent = _tmpl.format( - py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools) - - -def parse_requirement_arg(spec): - try: - return Requirement.parse(spec) - except ValueError as e: - raise DistutilsError( - "Not a URL, existing file, or requirement spec: %r" % (spec,) - ) from e - - -def parse_bdist_wininst(name): - """Return (base,pyversion) or (None,None) for possible .exe name""" - - lower = name.lower() - base, py_ver, plat = None, None, None - - if lower.endswith('.exe'): - if lower.endswith('.win32.exe'): - base = name[:-10] - plat = 'win32' - elif lower.startswith('.win32-py', -16): - py_ver = name[-7:-4] - base = name[:-16] - plat = 'win32' - elif lower.endswith('.win-amd64.exe'): - base = name[:-14] - plat = 'win-amd64' - elif lower.startswith('.win-amd64-py', -20): - py_ver = name[-7:-4] - base = name[:-20] - plat = 'win-amd64' - return base, py_ver, plat - - -def egg_info_for_url(url): - parts = urllib.parse.urlparse(url) - scheme, server, path, parameters, query, fragment = parts - base = urllib.parse.unquote(path.split('/')[-1]) - if server == 'sourceforge.net' and base == 'download': # XXX Yuck - base = urllib.parse.unquote(path.split('/')[-2]) - if '#' in base: - base, fragment = base.split('#', 1) - return base, fragment - - -def distros_for_url(url, metadata=None): - """Yield egg or source distribution objects that might be found at a URL""" - base, fragment = egg_info_for_url(url) - for dist in distros_for_location(url, base, metadata): - yield dist - if fragment: - match = EGG_FRAGMENT.match(fragment) - if match: - for dist in interpret_distro_name( - url, match.group(1), metadata, precedence=CHECKOUT_DIST - ): - yield dist - - -def distros_for_location(location, basename, metadata=None): - """Yield egg or source distribution objects based on basename""" - if basename.endswith('.egg.zip'): - basename = basename[:-4] # strip the .zip - if basename.endswith('.egg') and '-' in basename: - # only one, unambiguous interpretation - return [Distribution.from_location(location, basename, metadata)] - if basename.endswith('.whl') and '-' in basename: - wheel = Wheel(basename) - if not wheel.is_compatible(): - return [] - return [Distribution( - location=location, - project_name=wheel.project_name, - version=wheel.version, - # Increase priority over eggs. - precedence=EGG_DIST + 1, - )] - if basename.endswith('.exe'): - win_base, py_ver, platform = parse_bdist_wininst(basename) - if win_base is not None: - return interpret_distro_name( - location, win_base, metadata, py_ver, BINARY_DIST, platform - ) - # Try source distro extensions (.zip, .tgz, etc.) - # - for ext in EXTENSIONS: - if basename.endswith(ext): - basename = basename[:-len(ext)] - return interpret_distro_name(location, basename, metadata) - return [] # no extension matched - - -def distros_for_filename(filename, metadata=None): - """Yield possible egg or source distribution objects based on a filename""" - return distros_for_location( - normalize_path(filename), os.path.basename(filename), metadata - ) - - -def interpret_distro_name( - location, basename, metadata, py_version=None, precedence=SOURCE_DIST, - platform=None -): - """Generate alternative interpretations of a source distro name - - Note: if `location` is a filesystem filename, you should call - ``pkg_resources.normalize_path()`` on it before passing it to this - routine! - """ - # Generate alternative interpretations of a source distro name - # Because some packages are ambiguous as to name/versions split - # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. - # So, we generate each possible interpretation (e.g. "adns, python-1.1.0" - # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, - # the spurious interpretations should be ignored, because in the event - # there's also an "adns" package, the spurious "python-1.1.0" version will - # compare lower than any numeric version number, and is therefore unlikely - # to match a request for it. It's still a potential problem, though, and - # in the long run PyPI and the distutils should go for "safe" names and - # versions in distribution archive names (sdist and bdist). - - parts = basename.split('-') - if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]): - # it is a bdist_dumb, not an sdist -- bail out - return - - for p in range(1, len(parts) + 1): - yield Distribution( - location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), - py_version=py_version, precedence=precedence, - platform=platform - ) - - -def unique_values(func): - """ - Wrap a function returning an iterable such that the resulting iterable - only ever yields unique items. - """ - - @wraps(func) - def wrapper(*args, **kwargs): - return unique_everseen(func(*args, **kwargs)) - - return wrapper - - -REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) -# this line is here to fix emacs' cruddy broken syntax highlighting - - -@unique_values -def find_external_links(url, page): - """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" - - for match in REL.finditer(page): - tag, rel = match.groups() - rels = set(map(str.strip, rel.lower().split(','))) - if 'homepage' in rels or 'download' in rels: - for match in HREF.finditer(tag): - yield urllib.parse.urljoin(url, htmldecode(match.group(1))) - - for tag in ("Home Page", "Download URL"): - pos = page.find(tag) - if pos != -1: - match = HREF.search(page, pos) - if match: - yield urllib.parse.urljoin(url, htmldecode(match.group(1))) - - -class ContentChecker: - """ - A null content checker that defines the interface for checking content - """ - - def feed(self, block): - """ - Feed a block of data to the hash. - """ - return - - def is_valid(self): - """ - Check the hash. Return False if validation fails. - """ - return True - - def report(self, reporter, template): - """ - Call reporter with information about the checker (hash name) - substituted into the template. - """ - return - - -class HashChecker(ContentChecker): - pattern = re.compile( - r'(?Psha1|sha224|sha384|sha256|sha512|md5)=' - r'(?P[a-f0-9]+)' - ) - - def __init__(self, hash_name, expected): - self.hash_name = hash_name - self.hash = hashlib.new(hash_name) - self.expected = expected - - @classmethod - def from_url(cls, url): - "Construct a (possibly null) ContentChecker from a URL" - fragment = urllib.parse.urlparse(url)[-1] - if not fragment: - return ContentChecker() - match = cls.pattern.search(fragment) - if not match: - return ContentChecker() - return cls(**match.groupdict()) - - def feed(self, block): - self.hash.update(block) - - def is_valid(self): - return self.hash.hexdigest() == self.expected - - def report(self, reporter, template): - msg = template % self.hash_name - return reporter(msg) - - -class PackageIndex(Environment): - """A distribution index that scans web pages for download URLs""" - - def __init__( - self, index_url="https://pypi.org/simple/", hosts=('*',), - ca_bundle=None, verify_ssl=True, *args, **kw - ): - super().__init__(*args, **kw) - self.index_url = index_url + "/" [:not index_url.endswith('/')] - self.scanned_urls = {} - self.fetched_urls = {} - self.package_pages = {} - self.allows = re.compile('|'.join(map(translate, hosts))).match - self.to_scan = [] - self.opener = urllib.request.urlopen - - def add(self, dist): - # ignore invalid versions - try: - parse_version(dist.version) - except Exception: - return - return super().add(dist) - - # FIXME: 'PackageIndex.process_url' is too complex (14) - def process_url(self, url, retrieve=False): # noqa: C901 - """Evaluate a URL as a possible download, and maybe retrieve it""" - if url in self.scanned_urls and not retrieve: - return - self.scanned_urls[url] = True - if not URL_SCHEME(url): - self.process_filename(url) - return - else: - dists = list(distros_for_url(url)) - if dists: - if not self.url_ok(url): - return - self.debug("Found link: %s", url) - - if dists or not retrieve or url in self.fetched_urls: - list(map(self.add, dists)) - return # don't need the actual page - - if not self.url_ok(url): - self.fetched_urls[url] = True - return - - self.info("Reading %s", url) - self.fetched_urls[url] = True # prevent multiple fetch attempts - tmpl = "Download error on %s: %%s -- Some packages may not be found!" - f = self.open_url(url, tmpl % url) - if f is None: - return - if isinstance(f, urllib.error.HTTPError) and f.code == 401: - self.info("Authentication error: %s" % f.msg) - self.fetched_urls[f.url] = True - if 'html' not in f.headers.get('content-type', '').lower(): - f.close() # not html, we can't process it - return - - base = f.url # handle redirects - page = f.read() - if not isinstance(page, str): - # In Python 3 and got bytes but want str. - if isinstance(f, urllib.error.HTTPError): - # Errors have no charset, assume latin1: - charset = 'latin-1' - else: - charset = f.headers.get_param('charset') or 'latin-1' - page = page.decode(charset, "ignore") - f.close() - for match in HREF.finditer(page): - link = urllib.parse.urljoin(base, htmldecode(match.group(1))) - self.process_url(link) - if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: - page = self.process_index(url, page) - - def process_filename(self, fn, nested=False): - # process filenames or directories - if not os.path.exists(fn): - self.warn("Not found: %s", fn) - return - - if os.path.isdir(fn) and not nested: - path = os.path.realpath(fn) - for item in os.listdir(path): - self.process_filename(os.path.join(path, item), True) - - dists = distros_for_filename(fn) - if dists: - self.debug("Found: %s", fn) - list(map(self.add, dists)) - - def url_ok(self, url, fatal=False): - s = URL_SCHEME(url) - is_file = s and s.group(1).lower() == 'file' - if is_file or self.allows(urllib.parse.urlparse(url)[1]): - return True - msg = ( - "\nNote: Bypassing %s (disallowed host; see " - "http://bit.ly/2hrImnY for details).\n") - if fatal: - raise DistutilsError(msg % url) - else: - self.warn(msg, url) - - def scan_egg_links(self, search_path): - dirs = filter(os.path.isdir, search_path) - egg_links = ( - (path, entry) - for path in dirs - for entry in os.listdir(path) - if entry.endswith('.egg-link') - ) - list(itertools.starmap(self.scan_egg_link, egg_links)) - - def scan_egg_link(self, path, entry): - with open(os.path.join(path, entry)) as raw_lines: - # filter non-empty lines - lines = list(filter(None, map(str.strip, raw_lines))) - - if len(lines) != 2: - # format is not recognized; punt - return - - egg_path, setup_path = lines - - for dist in find_distributions(os.path.join(path, egg_path)): - dist.location = os.path.join(path, *lines) - dist.precedence = SOURCE_DIST - self.add(dist) - - def _scan(self, link): - # Process a URL to see if it's for a package page - NO_MATCH_SENTINEL = None, None - if not link.startswith(self.index_url): - return NO_MATCH_SENTINEL - - parts = list(map( - urllib.parse.unquote, link[len(self.index_url):].split('/') - )) - if len(parts) != 2 or '#' in parts[1]: - return NO_MATCH_SENTINEL - - # it's a package page, sanitize and index it - pkg = safe_name(parts[0]) - ver = safe_version(parts[1]) - self.package_pages.setdefault(pkg.lower(), {})[link] = True - return to_filename(pkg), to_filename(ver) - - def process_index(self, url, page): - """Process the contents of a PyPI page""" - - # process an index page into the package-page index - for match in HREF.finditer(page): - try: - self._scan(urllib.parse.urljoin(url, htmldecode(match.group(1)))) - except ValueError: - pass - - pkg, ver = self._scan(url) # ensure this page is in the page index - if not pkg: - return "" # no sense double-scanning non-package pages - - # process individual package page - for new_url in find_external_links(url, page): - # Process the found URL - base, frag = egg_info_for_url(new_url) - if base.endswith('.py') and not frag: - if ver: - new_url += '#egg=%s-%s' % (pkg, ver) - else: - self.need_version_info(url) - self.scan_url(new_url) - - return PYPI_MD5.sub( - lambda m: '%s' % m.group(1, 3, 2), page - ) - - def need_version_info(self, url): - self.scan_all( - "Page at %s links to .py file(s) without version info; an index " - "scan is required.", url - ) - - def scan_all(self, msg=None, *args): - if self.index_url not in self.fetched_urls: - if msg: - self.warn(msg, *args) - self.info( - "Scanning index of all packages (this may take a while)" - ) - self.scan_url(self.index_url) - - def find_packages(self, requirement): - self.scan_url(self.index_url + requirement.unsafe_name + '/') - - if not self.package_pages.get(requirement.key): - # Fall back to safe version of the name - self.scan_url(self.index_url + requirement.project_name + '/') - - if not self.package_pages.get(requirement.key): - # We couldn't find the target package, so search the index page too - self.not_found_in_index(requirement) - - for url in list(self.package_pages.get(requirement.key, ())): - # scan each page that might be related to the desired package - self.scan_url(url) - - def obtain(self, requirement, installer=None): - self.prescan() - self.find_packages(requirement) - for dist in self[requirement.key]: - if dist in requirement: - return dist - self.debug("%s does not match %s", requirement, dist) - return super(PackageIndex, self).obtain(requirement, installer) - - def check_hash(self, checker, filename, tfp): - """ - checker is a ContentChecker - """ - checker.report( - self.debug, - "Validating %%s checksum for %s" % filename) - if not checker.is_valid(): - tfp.close() - os.unlink(filename) - raise DistutilsError( - "%s validation failed for %s; " - "possible download problem?" - % (checker.hash.name, os.path.basename(filename)) - ) - - def add_find_links(self, urls): - """Add `urls` to the list that will be prescanned for searches""" - for url in urls: - if ( - self.to_scan is None # if we have already "gone online" - or not URL_SCHEME(url) # or it's a local file/directory - or url.startswith('file:') - or list(distros_for_url(url)) # or a direct package link - ): - # then go ahead and process it now - self.scan_url(url) - else: - # otherwise, defer retrieval till later - self.to_scan.append(url) - - def prescan(self): - """Scan urls scheduled for prescanning (e.g. --find-links)""" - if self.to_scan: - list(map(self.scan_url, self.to_scan)) - self.to_scan = None # from now on, go ahead and process immediately - - def not_found_in_index(self, requirement): - if self[requirement.key]: # we've seen at least one distro - meth, msg = self.info, "Couldn't retrieve index page for %r" - else: # no distros seen for this name, might be misspelled - meth, msg = ( - self.warn, - "Couldn't find index page for %r (maybe misspelled?)") - meth(msg, requirement.unsafe_name) - self.scan_all() - - def download(self, spec, tmpdir): - """Locate and/or download `spec` to `tmpdir`, returning a local path - - `spec` may be a ``Requirement`` object, or a string containing a URL, - an existing local filename, or a project/version requirement spec - (i.e. the string form of a ``Requirement`` object). If it is the URL - of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one - that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is - automatically created alongside the downloaded file. - - If `spec` is a ``Requirement`` object or a string containing a - project/version requirement spec, this method returns the location of - a matching distribution (possibly after downloading it to `tmpdir`). - If `spec` is a locally existing file or directory name, it is simply - returned unchanged. If `spec` is a URL, it is downloaded to a subpath - of `tmpdir`, and the local filename is returned. Various errors may be - raised if a problem occurs during downloading. - """ - if not isinstance(spec, Requirement): - scheme = URL_SCHEME(spec) - if scheme: - # It's a url, download it to tmpdir - found = self._download_url(scheme.group(1), spec, tmpdir) - base, fragment = egg_info_for_url(spec) - if base.endswith('.py'): - found = self.gen_setup(found, fragment, tmpdir) - return found - elif os.path.exists(spec): - # Existing file or directory, just return it - return spec - else: - spec = parse_requirement_arg(spec) - return getattr(self.fetch_distribution(spec, tmpdir), 'location', None) - - def fetch_distribution( # noqa: C901 # is too complex (14) # FIXME - self, requirement, tmpdir, force_scan=False, source=False, - develop_ok=False, local_index=None): - """Obtain a distribution suitable for fulfilling `requirement` - - `requirement` must be a ``pkg_resources.Requirement`` instance. - If necessary, or if the `force_scan` flag is set, the requirement is - searched for in the (online) package index as well as the locally - installed packages. If a distribution matching `requirement` is found, - the returned distribution's ``location`` is the value you would have - gotten from calling the ``download()`` method with the matching - distribution's URL or filename. If no matching distribution is found, - ``None`` is returned. - - If the `source` flag is set, only source distributions and source - checkout links will be considered. Unless the `develop_ok` flag is - set, development and system eggs (i.e., those using the ``.egg-info`` - format) will be ignored. - """ - # process a Requirement - self.info("Searching for %s", requirement) - skipped = {} - dist = None - - def find(req, env=None): - if env is None: - env = self - # Find a matching distribution; may be called more than once - - for dist in env[req.key]: - - if dist.precedence == DEVELOP_DIST and not develop_ok: - if dist not in skipped: - self.warn( - "Skipping development or system egg: %s", dist, - ) - skipped[dist] = 1 - continue - - test = ( - dist in req - and (dist.precedence <= SOURCE_DIST or not source) - ) - if test: - loc = self.download(dist.location, tmpdir) - dist.download_location = loc - if os.path.exists(dist.download_location): - return dist - - if force_scan: - self.prescan() - self.find_packages(requirement) - dist = find(requirement) - - if not dist and local_index is not None: - dist = find(requirement, local_index) - - if dist is None: - if self.to_scan is not None: - self.prescan() - dist = find(requirement) - - if dist is None and not force_scan: - self.find_packages(requirement) - dist = find(requirement) - - if dist is None: - self.warn( - "No local packages or working download links found for %s%s", - (source and "a source distribution of " or ""), - requirement, - ) - else: - self.info("Best match: %s", dist) - return dist.clone(location=dist.download_location) - - def fetch(self, requirement, tmpdir, force_scan=False, source=False): - """Obtain a file suitable for fulfilling `requirement` - - DEPRECATED; use the ``fetch_distribution()`` method now instead. For - backward compatibility, this routine is identical but returns the - ``location`` of the downloaded distribution instead of a distribution - object. - """ - dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) - if dist is not None: - return dist.location - return None - - def gen_setup(self, filename, fragment, tmpdir): - match = EGG_FRAGMENT.match(fragment) - dists = match and [ - d for d in - interpret_distro_name(filename, match.group(1), None) if d.version - ] or [] - - if len(dists) == 1: # unambiguous ``#egg`` fragment - basename = os.path.basename(filename) - - # Make sure the file has been downloaded to the temp dir. - if os.path.dirname(filename) != tmpdir: - dst = os.path.join(tmpdir, basename) - from setuptools.command.easy_install import samefile - if not samefile(filename, dst): - shutil.copy2(filename, dst) - filename = dst - - with open(os.path.join(tmpdir, 'setup.py'), 'w') as file: - file.write( - "from setuptools import setup\n" - "setup(name=%r, version=%r, py_modules=[%r])\n" - % ( - dists[0].project_name, dists[0].version, - os.path.splitext(basename)[0] - ) - ) - return filename - - elif match: - raise DistutilsError( - "Can't unambiguously interpret project/version identifier %r; " - "any dashes in the name or version should be escaped using " - "underscores. %r" % (fragment, dists) - ) - else: - raise DistutilsError( - "Can't process plain .py files without an '#egg=name-version'" - " suffix to enable automatic setup script generation." - ) - - dl_blocksize = 8192 - - def _download_to(self, url, filename): - self.info("Downloading %s", url) - # Download the file - fp = None - try: - checker = HashChecker.from_url(url) - fp = self.open_url(url) - if isinstance(fp, urllib.error.HTTPError): - raise DistutilsError( - "Can't download %s: %s %s" % (url, fp.code, fp.msg) - ) - headers = fp.info() - blocknum = 0 - bs = self.dl_blocksize - size = -1 - if "content-length" in headers: - # Some servers return multiple Content-Length headers :( - sizes = headers.get_all('Content-Length') - size = max(map(int, sizes)) - self.reporthook(url, filename, blocknum, bs, size) - with open(filename, 'wb') as tfp: - while True: - block = fp.read(bs) - if block: - checker.feed(block) - tfp.write(block) - blocknum += 1 - self.reporthook(url, filename, blocknum, bs, size) - else: - break - self.check_hash(checker, filename, tfp) - return headers - finally: - if fp: - fp.close() - - def reporthook(self, url, filename, blocknum, blksize, size): - pass # no-op - - # FIXME: - def open_url(self, url, warning=None): # noqa: C901 # is too complex (12) - if url.startswith('file:'): - return local_open(url) - try: - return open_with_auth(url, self.opener) - except (ValueError, http.client.InvalidURL) as v: - msg = ' '.join([str(arg) for arg in v.args]) - if warning: - self.warn(warning, msg) - else: - raise DistutilsError('%s %s' % (url, msg)) from v - except urllib.error.HTTPError as v: - return v - except urllib.error.URLError as v: - if warning: - self.warn(warning, v.reason) - else: - raise DistutilsError("Download error for %s: %s" - % (url, v.reason)) from v - except http.client.BadStatusLine as v: - if warning: - self.warn(warning, v.line) - else: - raise DistutilsError( - '%s returned a bad status line. The server might be ' - 'down, %s' % - (url, v.line) - ) from v - except (http.client.HTTPException, socket.error) as v: - if warning: - self.warn(warning, v) - else: - raise DistutilsError("Download error for %s: %s" - % (url, v)) from v - - def _download_url(self, scheme, url, tmpdir): - # Determine download filename - # - name, fragment = egg_info_for_url(url) - if name: - while '..' in name: - name = name.replace('..', '.').replace('\\', '_') - else: - name = "__downloaded__" # default if URL has no path contents - - if name.endswith('.egg.zip'): - name = name[:-4] # strip the extra .zip before download - - filename = os.path.join(tmpdir, name) - - # Download the file - # - if scheme == 'svn' or scheme.startswith('svn+'): - return self._download_svn(url, filename) - elif scheme == 'git' or scheme.startswith('git+'): - return self._download_git(url, filename) - elif scheme.startswith('hg+'): - return self._download_hg(url, filename) - elif scheme == 'file': - return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) - else: - self.url_ok(url, True) # raises error if not allowed - return self._attempt_download(url, filename) - - def scan_url(self, url): - self.process_url(url, True) - - def _attempt_download(self, url, filename): - headers = self._download_to(url, filename) - if 'html' in headers.get('content-type', '').lower(): - return self._download_html(url, headers, filename) - else: - return filename - - def _download_html(self, url, headers, filename): - file = open(filename) - for line in file: - if line.strip(): - # Check for a subversion index page - if re.search(r'([^- ]+ - )?Revision \d+:', line): - # it's a subversion index page: - file.close() - os.unlink(filename) - return self._download_svn(url, filename) - break # not an index page - file.close() - os.unlink(filename) - raise DistutilsError("Unexpected HTML page found at " + url) - - def _download_svn(self, url, filename): - warnings.warn("SVN download support is deprecated", UserWarning) - url = url.split('#', 1)[0] # remove any fragment for svn's sake - creds = '' - if url.lower().startswith('svn:') and '@' in url: - scheme, netloc, path, p, q, f = urllib.parse.urlparse(url) - if not netloc and path.startswith('//') and '/' in path[2:]: - netloc, path = path[2:].split('/', 1) - auth, host = _splituser(netloc) - if auth: - if ':' in auth: - user, pw = auth.split(':', 1) - creds = " --username=%s --password=%s" % (user, pw) - else: - creds = " --username=" + auth - netloc = host - parts = scheme, netloc, url, p, q, f - url = urllib.parse.urlunparse(parts) - self.info("Doing subversion checkout from %s to %s", url, filename) - os.system("svn checkout%s -q %s %s" % (creds, url, filename)) - return filename - - @staticmethod - def _vcs_split_rev_from_url(url, pop_prefix=False): - scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) - - scheme = scheme.split('+', 1)[-1] - - # Some fragment identification fails - path = path.split('#', 1)[0] - - rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) - - # Also, discard fragment - url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) - - return url, rev - - def _download_git(self, url, filename): - filename = filename.split('#', 1)[0] - url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) - - self.info("Doing git clone from %s to %s", url, filename) - os.system("git clone --quiet %s %s" % (url, filename)) - - if rev is not None: - self.info("Checking out %s", rev) - os.system("git -C %s checkout --quiet %s" % ( - filename, - rev, - )) - - return filename - - def _download_hg(self, url, filename): - filename = filename.split('#', 1)[0] - url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) - - self.info("Doing hg clone from %s to %s", url, filename) - os.system("hg clone --quiet %s %s" % (url, filename)) - - if rev is not None: - self.info("Updating to %s", rev) - os.system("hg --cwd %s up -C -r %s -q" % ( - filename, - rev, - )) - - return filename - - def debug(self, msg, *args): - log.debug(msg, *args) - - def info(self, msg, *args): - log.info(msg, *args) - - def warn(self, msg, *args): - log.warn(msg, *args) - - -# This pattern matches a character entity reference (a decimal numeric -# references, a hexadecimal numeric reference, or a named reference). -entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub - - -def decode_entity(match): - what = match.group(0) - return html.unescape(what) - - -def htmldecode(text): - """ - Decode HTML entities in the given text. - - >>> htmldecode( - ... 'https://../package_name-0.1.2.tar.gz' - ... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz') - 'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz' - """ - return entity_sub(decode_entity, text) - - -def socket_timeout(timeout=15): - def _socket_timeout(func): - def _socket_timeout(*args, **kwargs): - old_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(timeout) - try: - return func(*args, **kwargs) - finally: - socket.setdefaulttimeout(old_timeout) - - return _socket_timeout - - return _socket_timeout - - -def _encode_auth(auth): - """ - Encode auth from a URL suitable for an HTTP header. - >>> str(_encode_auth('username%3Apassword')) - 'dXNlcm5hbWU6cGFzc3dvcmQ=' - - Long auth strings should not cause a newline to be inserted. - >>> long_auth = 'username:' + 'password'*10 - >>> chr(10) in str(_encode_auth(long_auth)) - False - """ - auth_s = urllib.parse.unquote(auth) - # convert to bytes - auth_bytes = auth_s.encode() - encoded_bytes = base64.b64encode(auth_bytes) - # convert back to a string - encoded = encoded_bytes.decode() - # strip the trailing carriage return - return encoded.replace('\n', '') - - -class Credential: - """ - A username/password pair. Use like a namedtuple. - """ - - def __init__(self, username, password): - self.username = username - self.password = password - - def __iter__(self): - yield self.username - yield self.password - - def __str__(self): - return '%(username)s:%(password)s' % vars(self) - - -class PyPIConfig(configparser.RawConfigParser): - def __init__(self): - """ - Load from ~/.pypirc - """ - defaults = dict.fromkeys(['username', 'password', 'repository'], '') - super().__init__(defaults) - - rc = os.path.join(os.path.expanduser('~'), '.pypirc') - if os.path.exists(rc): - self.read(rc) - - @property - def creds_by_repository(self): - sections_with_repositories = [ - section for section in self.sections() - if self.get(section, 'repository').strip() - ] - - return dict(map(self._get_repo_cred, sections_with_repositories)) - - def _get_repo_cred(self, section): - repo = self.get(section, 'repository').strip() - return repo, Credential( - self.get(section, 'username').strip(), - self.get(section, 'password').strip(), - ) - - def find_credential(self, url): - """ - If the URL indicated appears to be a repository defined in this - config, return the credential for that repository. - """ - for repository, cred in self.creds_by_repository.items(): - if url.startswith(repository): - return cred - - -def open_with_auth(url, opener=urllib.request.urlopen): - """Open a urllib2 request, handling HTTP authentication""" - - parsed = urllib.parse.urlparse(url) - scheme, netloc, path, params, query, frag = parsed - - # Double scheme does not raise on macOS as revealed by a - # failing test. We would expect "nonnumeric port". Refs #20. - if netloc.endswith(':'): - raise http.client.InvalidURL("nonnumeric port: ''") - - if scheme in ('http', 'https'): - auth, address = _splituser(netloc) - else: - auth = None - - if not auth: - cred = PyPIConfig().find_credential(url) - if cred: - auth = str(cred) - info = cred.username, url - log.info('Authenticating as %s for %s (from .pypirc)', *info) - - if auth: - auth = "Basic " + _encode_auth(auth) - parts = scheme, address, path, params, query, frag - new_url = urllib.parse.urlunparse(parts) - request = urllib.request.Request(new_url) - request.add_header("Authorization", auth) - else: - request = urllib.request.Request(url) - - request.add_header('User-Agent', user_agent) - fp = opener(request) - - if auth: - # Put authentication info back into request URL if same host, - # so that links found on the page will work - s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) - if s2 == scheme and h2 == address: - parts = s2, netloc, path2, param2, query2, frag2 - fp.url = urllib.parse.urlunparse(parts) - - return fp - - -# copy of urllib.parse._splituser from Python 3.8 -def _splituser(host): - """splituser('user[:passwd]@host[:port]') - --> 'user[:passwd]', 'host[:port]'.""" - user, delim, host = host.rpartition('@') - return (user if delim else None), host - - -# adding a timeout to avoid freezing package_index -open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) - - -def fix_sf_url(url): - return url # backward compatibility - - -def local_open(url): - """Read a local path, with special support for directories""" - scheme, server, path, param, query, frag = urllib.parse.urlparse(url) - filename = urllib.request.url2pathname(path) - if os.path.isfile(filename): - return urllib.request.urlopen(url) - elif path.endswith('/') and os.path.isdir(filename): - files = [] - for f in os.listdir(filename): - filepath = os.path.join(filename, f) - if f == 'index.html': - with open(filepath, 'r') as fp: - body = fp.read() - break - elif os.path.isdir(filepath): - f += '/' - files.append('<a href="{name}">{name}</a>'.format(name=f)) - else: - tmpl = ( - "<html><head><title>{url}" - "{files}") - body = tmpl.format(url=url, files='\n'.join(files)) - status, message = 200, "OK" - else: - status, message, body = 404, "Path not found", "Not found" - - headers = {'content-type': 'text/html'} - body_stream = io.StringIO(body) - return urllib.error.HTTPError(url, status, message, headers, body_stream) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/py34compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/py34compat.py deleted file mode 100755 index 3ad9172..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/py34compat.py +++ /dev/null @@ -1,13 +0,0 @@ -import importlib - -try: - import importlib.util -except ImportError: - pass - - -try: - module_from_spec = importlib.util.module_from_spec -except AttributeError: - def module_from_spec(spec): - return spec.loader.load_module(spec.name) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/sandbox.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/sandbox.py deleted file mode 100755 index 034fc80..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/sandbox.py +++ /dev/null @@ -1,530 +0,0 @@ -import os -import sys -import tempfile -import operator -import functools -import itertools -import re -import contextlib -import pickle -import textwrap -import builtins - -import pkg_resources -from distutils.errors import DistutilsError -from pkg_resources import working_set - -if sys.platform.startswith('java'): - import org.python.modules.posix.PosixModule as _os -else: - _os = sys.modules[os.name] -try: - _file = file -except NameError: - _file = None -_open = open - - -__all__ = [ - "AbstractSandbox", - "DirectorySandbox", - "SandboxViolation", - "run_setup", -] - - -def _execfile(filename, globals, locals=None): - """ - Python 3 implementation of execfile. - """ - mode = 'rb' - with open(filename, mode) as stream: - script = stream.read() - if locals is None: - locals = globals - code = compile(script, filename, 'exec') - exec(code, globals, locals) - - -@contextlib.contextmanager -def save_argv(repl=None): - saved = sys.argv[:] - if repl is not None: - sys.argv[:] = repl - try: - yield saved - finally: - sys.argv[:] = saved - - -@contextlib.contextmanager -def save_path(): - saved = sys.path[:] - try: - yield saved - finally: - sys.path[:] = saved - - -@contextlib.contextmanager -def override_temp(replacement): - """ - Monkey-patch tempfile.tempdir with replacement, ensuring it exists - """ - os.makedirs(replacement, exist_ok=True) - - saved = tempfile.tempdir - - tempfile.tempdir = replacement - - try: - yield - finally: - tempfile.tempdir = saved - - -@contextlib.contextmanager -def pushd(target): - saved = os.getcwd() - os.chdir(target) - try: - yield saved - finally: - os.chdir(saved) - - -class UnpickleableException(Exception): - """ - An exception representing another Exception that could not be pickled. - """ - - @staticmethod - def dump(type, exc): - """ - Always return a dumped (pickled) type and exc. If exc can't be pickled, - wrap it in UnpickleableException first. - """ - try: - return pickle.dumps(type), pickle.dumps(exc) - except Exception: - # get UnpickleableException inside the sandbox - from setuptools.sandbox import UnpickleableException as cls - - return cls.dump(cls, cls(repr(exc))) - - -class ExceptionSaver: - """ - A Context Manager that will save an exception, serialized, and restore it - later. - """ - - def __enter__(self): - return self - - def __exit__(self, type, exc, tb): - if not exc: - return - - # dump the exception - self._saved = UnpickleableException.dump(type, exc) - self._tb = tb - - # suppress the exception - return True - - def resume(self): - "restore and re-raise any exception" - - if '_saved' not in vars(self): - return - - type, exc = map(pickle.loads, self._saved) - raise exc.with_traceback(self._tb) - - -@contextlib.contextmanager -def save_modules(): - """ - Context in which imported modules are saved. - - Translates exceptions internal to the context into the equivalent exception - outside the context. - """ - saved = sys.modules.copy() - with ExceptionSaver() as saved_exc: - yield saved - - sys.modules.update(saved) - # remove any modules imported since - del_modules = ( - mod_name - for mod_name in sys.modules - if mod_name not in saved - # exclude any encodings modules. See #285 - and not mod_name.startswith('encodings.') - ) - _clear_modules(del_modules) - - saved_exc.resume() - - -def _clear_modules(module_names): - for mod_name in list(module_names): - del sys.modules[mod_name] - - -@contextlib.contextmanager -def save_pkg_resources_state(): - saved = pkg_resources.__getstate__() - try: - yield saved - finally: - pkg_resources.__setstate__(saved) - - -@contextlib.contextmanager -def setup_context(setup_dir): - temp_dir = os.path.join(setup_dir, 'temp') - with save_pkg_resources_state(): - with save_modules(): - with save_path(): - hide_setuptools() - with save_argv(): - with override_temp(temp_dir): - with pushd(setup_dir): - # ensure setuptools commands are available - __import__('setuptools') - yield - - -_MODULES_TO_HIDE = { - 'setuptools', - 'distutils', - 'pkg_resources', - 'Cython', - '_distutils_hack', -} - - -def _needs_hiding(mod_name): - """ - >>> _needs_hiding('setuptools') - True - >>> _needs_hiding('pkg_resources') - True - >>> _needs_hiding('setuptools_plugin') - False - >>> _needs_hiding('setuptools.__init__') - True - >>> _needs_hiding('distutils') - True - >>> _needs_hiding('os') - False - >>> _needs_hiding('Cython') - True - """ - base_module = mod_name.split('.', 1)[0] - return base_module in _MODULES_TO_HIDE - - -def hide_setuptools(): - """ - Remove references to setuptools' modules from sys.modules to allow the - invocation to import the most appropriate setuptools. This technique is - necessary to avoid issues such as #315 where setuptools upgrading itself - would fail to find a function declared in the metadata. - """ - _distutils_hack = sys.modules.get('_distutils_hack', None) - if _distutils_hack is not None: - _distutils_hack.remove_shim() - - modules = filter(_needs_hiding, sys.modules) - _clear_modules(modules) - - -def run_setup(setup_script, args): - """Run a distutils setup script, sandboxed in its directory""" - setup_dir = os.path.abspath(os.path.dirname(setup_script)) - with setup_context(setup_dir): - try: - sys.argv[:] = [setup_script] + list(args) - sys.path.insert(0, setup_dir) - # reset to include setup dir, w/clean callback list - working_set.__init__() - working_set.callbacks.append(lambda dist: dist.activate()) - - with DirectorySandbox(setup_dir): - ns = dict(__file__=setup_script, __name__='__main__') - _execfile(setup_script, ns) - except SystemExit as v: - if v.args and v.args[0]: - raise - # Normal exit, just return - - -class AbstractSandbox: - """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" - - _active = False - - def __init__(self): - self._attrs = [ - name - for name in dir(_os) - if not name.startswith('_') and hasattr(self, name) - ] - - def _copy(self, source): - for name in self._attrs: - setattr(os, name, getattr(source, name)) - - def __enter__(self): - self._copy(self) - if _file: - builtins.file = self._file - builtins.open = self._open - self._active = True - - def __exit__(self, exc_type, exc_value, traceback): - self._active = False - if _file: - builtins.file = _file - builtins.open = _open - self._copy(_os) - - def run(self, func): - """Run 'func' under os sandboxing""" - with self: - return func() - - def _mk_dual_path_wrapper(name): - original = getattr(_os, name) - - def wrap(self, src, dst, *args, **kw): - if self._active: - src, dst = self._remap_pair(name, src, dst, *args, **kw) - return original(src, dst, *args, **kw) - - return wrap - - for name in ["rename", "link", "symlink"]: - if hasattr(_os, name): - locals()[name] = _mk_dual_path_wrapper(name) - - def _mk_single_path_wrapper(name, original=None): - original = original or getattr(_os, name) - - def wrap(self, path, *args, **kw): - if self._active: - path = self._remap_input(name, path, *args, **kw) - return original(path, *args, **kw) - - return wrap - - if _file: - _file = _mk_single_path_wrapper('file', _file) - _open = _mk_single_path_wrapper('open', _open) - for name in [ - "stat", - "listdir", - "chdir", - "open", - "chmod", - "chown", - "mkdir", - "remove", - "unlink", - "rmdir", - "utime", - "lchown", - "chroot", - "lstat", - "startfile", - "mkfifo", - "mknod", - "pathconf", - "access", - ]: - if hasattr(_os, name): - locals()[name] = _mk_single_path_wrapper(name) - - def _mk_single_with_return(name): - original = getattr(_os, name) - - def wrap(self, path, *args, **kw): - if self._active: - path = self._remap_input(name, path, *args, **kw) - return self._remap_output(name, original(path, *args, **kw)) - return original(path, *args, **kw) - - return wrap - - for name in ['readlink', 'tempnam']: - if hasattr(_os, name): - locals()[name] = _mk_single_with_return(name) - - def _mk_query(name): - original = getattr(_os, name) - - def wrap(self, *args, **kw): - retval = original(*args, **kw) - if self._active: - return self._remap_output(name, retval) - return retval - - return wrap - - for name in ['getcwd', 'tmpnam']: - if hasattr(_os, name): - locals()[name] = _mk_query(name) - - def _validate_path(self, path): - """Called to remap or validate any path, whether input or output""" - return path - - def _remap_input(self, operation, path, *args, **kw): - """Called for path inputs""" - return self._validate_path(path) - - def _remap_output(self, operation, path): - """Called for path outputs""" - return self._validate_path(path) - - def _remap_pair(self, operation, src, dst, *args, **kw): - """Called for path pairs like rename, link, and symlink operations""" - return ( - self._remap_input(operation + '-from', src, *args, **kw), - self._remap_input(operation + '-to', dst, *args, **kw), - ) - - -if hasattr(os, 'devnull'): - _EXCEPTIONS = [os.devnull] -else: - _EXCEPTIONS = [] - - -class DirectorySandbox(AbstractSandbox): - """Restrict operations to a single subdirectory - pseudo-chroot""" - - write_ops = dict.fromkeys( - [ - "open", - "chmod", - "chown", - "mkdir", - "remove", - "unlink", - "rmdir", - "utime", - "lchown", - "chroot", - "mkfifo", - "mknod", - "tempnam", - ] - ) - - _exception_patterns = [] - "exempt writing to paths that match the pattern" - - def __init__(self, sandbox, exceptions=_EXCEPTIONS): - self._sandbox = os.path.normcase(os.path.realpath(sandbox)) - self._prefix = os.path.join(self._sandbox, '') - self._exceptions = [ - os.path.normcase(os.path.realpath(path)) for path in exceptions - ] - AbstractSandbox.__init__(self) - - def _violation(self, operation, *args, **kw): - from setuptools.sandbox import SandboxViolation - - raise SandboxViolation(operation, args, kw) - - if _file: - - def _file(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): - self._violation("file", path, mode, *args, **kw) - return _file(path, mode, *args, **kw) - - def _open(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): - self._violation("open", path, mode, *args, **kw) - return _open(path, mode, *args, **kw) - - def tmpnam(self): - self._violation("tmpnam") - - def _ok(self, path): - active = self._active - try: - self._active = False - realpath = os.path.normcase(os.path.realpath(path)) - return ( - self._exempted(realpath) - or realpath == self._sandbox - or realpath.startswith(self._prefix) - ) - finally: - self._active = active - - def _exempted(self, filepath): - start_matches = ( - filepath.startswith(exception) for exception in self._exceptions - ) - pattern_matches = ( - re.match(pattern, filepath) for pattern in self._exception_patterns - ) - candidates = itertools.chain(start_matches, pattern_matches) - return any(candidates) - - def _remap_input(self, operation, path, *args, **kw): - """Called for path inputs""" - if operation in self.write_ops and not self._ok(path): - self._violation(operation, os.path.realpath(path), *args, **kw) - return path - - def _remap_pair(self, operation, src, dst, *args, **kw): - """Called for path pairs like rename, link, and symlink operations""" - if not self._ok(src) or not self._ok(dst): - self._violation(operation, src, dst, *args, **kw) - return (src, dst) - - def open(self, file, flags, mode=0o777, *args, **kw): - """Called for low-level os.open()""" - if flags & WRITE_FLAGS and not self._ok(file): - self._violation("os.open", file, flags, mode, *args, **kw) - return _os.open(file, flags, mode, *args, **kw) - - -WRITE_FLAGS = functools.reduce( - operator.or_, - [ - getattr(_os, a, 0) - for a in "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split() - ], -) - - -class SandboxViolation(DistutilsError): - """A setup script attempted to modify the filesystem outside the sandbox""" - - tmpl = textwrap.dedent( - """ - SandboxViolation: {cmd}{args!r} {kwargs} - - The package setup script has attempted to modify files on your system - that are not within the EasyInstall build area, and has been aborted. - - This package cannot be safely installed by EasyInstall, and may not - support alternate installation locations even if you run its setup - script by hand. Please inform the package's author and the EasyInstall - maintainers to find out if a fix or workaround is available. - """ - ).lstrip() - - def __str__(self): - cmd, args, kwargs = self.args - return self.tmpl.format(**locals()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/script (dev).tmpl b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/script (dev).tmpl deleted file mode 100644 index 39a24b0..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/script (dev).tmpl +++ /dev/null @@ -1,6 +0,0 @@ -# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r -__requires__ = %(spec)r -__import__('pkg_resources').require(%(spec)r) -__file__ = %(dev_path)r -with open(__file__) as f: - exec(compile(f.read(), __file__, 'exec')) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/script.tmpl b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/script.tmpl deleted file mode 100644 index ff5efbc..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/script.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r -__requires__ = %(spec)r -__import__('pkg_resources').run_script(%(spec)r, %(script_name)r) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/unicode_utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/unicode_utils.py deleted file mode 100755 index e84e65e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/unicode_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -import unicodedata -import sys - - -# HFS Plus uses decomposed UTF-8 -def decompose(path): - if isinstance(path, str): - return unicodedata.normalize('NFD', path) - try: - path = path.decode('utf-8') - path = unicodedata.normalize('NFD', path) - path = path.encode('utf-8') - except UnicodeError: - pass # Not UTF-8 - return path - - -def filesys_decode(path): - """ - Ensure that the given path is decoded, - NONE when no expected encoding works - """ - - if isinstance(path, str): - return path - - fs_enc = sys.getfilesystemencoding() or 'utf-8' - candidates = fs_enc, 'utf-8' - - for enc in candidates: - try: - return path.decode(enc) - except UnicodeDecodeError: - continue - - -def try_encode(string, enc): - "turn unicode encoding into a functional routine" - try: - return string.encode(enc) - except UnicodeEncodeError: - return None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/version.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/version.py deleted file mode 100755 index 95e1869..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/version.py +++ /dev/null @@ -1,6 +0,0 @@ -import pkg_resources - -try: - __version__ = pkg_resources.get_distribution('setuptools').version -except Exception: - __version__ = 'unknown' diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/wheel.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/wheel.py deleted file mode 100755 index 9819e8b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/wheel.py +++ /dev/null @@ -1,213 +0,0 @@ -"""Wheels support.""" - -from distutils.util import get_platform -from distutils import log -import email -import itertools -import os -import posixpath -import re -import zipfile - -import pkg_resources -import setuptools -from pkg_resources import parse_version -from setuptools.extern.packaging.tags import sys_tags -from setuptools.extern.packaging.utils import canonicalize_name -from setuptools.command.egg_info import write_requirements - - -WHEEL_NAME = re.compile( - r"""^(?P.+?)-(?P\d.*?) - ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) - )\.whl$""", - re.VERBOSE).match - -NAMESPACE_PACKAGE_INIT = \ - "__import__('pkg_resources').declare_namespace(__name__)\n" - - -def unpack(src_dir, dst_dir): - '''Move everything under `src_dir` to `dst_dir`, and delete the former.''' - for dirpath, dirnames, filenames in os.walk(src_dir): - subdir = os.path.relpath(dirpath, src_dir) - for f in filenames: - src = os.path.join(dirpath, f) - dst = os.path.join(dst_dir, subdir, f) - os.renames(src, dst) - for n, d in reversed(list(enumerate(dirnames))): - src = os.path.join(dirpath, d) - dst = os.path.join(dst_dir, subdir, d) - if not os.path.exists(dst): - # Directory does not exist in destination, - # rename it and prune it from os.walk list. - os.renames(src, dst) - del dirnames[n] - # Cleanup. - for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True): - assert not filenames - os.rmdir(dirpath) - - -class Wheel: - - def __init__(self, filename): - match = WHEEL_NAME(os.path.basename(filename)) - if match is None: - raise ValueError('invalid wheel name: %r' % filename) - self.filename = filename - for k, v in match.groupdict().items(): - setattr(self, k, v) - - def tags(self): - '''List tags (py_version, abi, platform) supported by this wheel.''' - return itertools.product( - self.py_version.split('.'), - self.abi.split('.'), - self.platform.split('.'), - ) - - def is_compatible(self): - '''Is the wheel is compatible with the current platform?''' - supported_tags = set( - (t.interpreter, t.abi, t.platform) for t in sys_tags()) - return next((True for t in self.tags() if t in supported_tags), False) - - def egg_name(self): - return pkg_resources.Distribution( - project_name=self.project_name, version=self.version, - platform=(None if self.platform == 'any' else get_platform()), - ).egg_name() + '.egg' - - def get_dist_info(self, zf): - # find the correct name of the .dist-info dir in the wheel file - for member in zf.namelist(): - dirname = posixpath.dirname(member) - if (dirname.endswith('.dist-info') and - canonicalize_name(dirname).startswith( - canonicalize_name(self.project_name))): - return dirname - raise ValueError("unsupported wheel format. .dist-info not found") - - def install_as_egg(self, destination_eggdir): - '''Install wheel as an egg directory.''' - with zipfile.ZipFile(self.filename) as zf: - self._install_as_egg(destination_eggdir, zf) - - def _install_as_egg(self, destination_eggdir, zf): - dist_basename = '%s-%s' % (self.project_name, self.version) - dist_info = self.get_dist_info(zf) - dist_data = '%s.data' % dist_basename - egg_info = os.path.join(destination_eggdir, 'EGG-INFO') - - self._convert_metadata(zf, destination_eggdir, dist_info, egg_info) - self._move_data_entries(destination_eggdir, dist_data) - self._fix_namespace_packages(egg_info, destination_eggdir) - - @staticmethod - def _convert_metadata(zf, destination_eggdir, dist_info, egg_info): - def get_metadata(name): - with zf.open(posixpath.join(dist_info, name)) as fp: - value = fp.read().decode('utf-8') - return email.parser.Parser().parsestr(value) - - wheel_metadata = get_metadata('WHEEL') - # Check wheel format version is supported. - wheel_version = parse_version(wheel_metadata.get('Wheel-Version')) - wheel_v1 = ( - parse_version('1.0') <= wheel_version < parse_version('2.0dev0') - ) - if not wheel_v1: - raise ValueError( - 'unsupported wheel format version: %s' % wheel_version) - # Extract to target directory. - os.mkdir(destination_eggdir) - zf.extractall(destination_eggdir) - # Convert metadata. - dist_info = os.path.join(destination_eggdir, dist_info) - dist = pkg_resources.Distribution.from_location( - destination_eggdir, dist_info, - metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info), - ) - - # Note: Evaluate and strip markers now, - # as it's difficult to convert back from the syntax: - # foobar; "linux" in sys_platform and extra == 'test' - def raw_req(req): - req.marker = None - return str(req) - install_requires = list(map(raw_req, dist.requires())) - extras_require = { - extra: [ - req - for req in map(raw_req, dist.requires((extra,))) - if req not in install_requires - ] - for extra in dist.extras - } - os.rename(dist_info, egg_info) - os.rename( - os.path.join(egg_info, 'METADATA'), - os.path.join(egg_info, 'PKG-INFO'), - ) - setup_dist = setuptools.Distribution( - attrs=dict( - install_requires=install_requires, - extras_require=extras_require, - ), - ) - # Temporarily disable info traces. - log_threshold = log._global_log.threshold - log.set_threshold(log.WARN) - try: - write_requirements( - setup_dist.get_command_obj('egg_info'), - None, - os.path.join(egg_info, 'requires.txt'), - ) - finally: - log.set_threshold(log_threshold) - - @staticmethod - def _move_data_entries(destination_eggdir, dist_data): - """Move data entries to their correct location.""" - dist_data = os.path.join(destination_eggdir, dist_data) - dist_data_scripts = os.path.join(dist_data, 'scripts') - if os.path.exists(dist_data_scripts): - egg_info_scripts = os.path.join( - destination_eggdir, 'EGG-INFO', 'scripts') - os.mkdir(egg_info_scripts) - for entry in os.listdir(dist_data_scripts): - # Remove bytecode, as it's not properly handled - # during easy_install scripts install phase. - if entry.endswith('.pyc'): - os.unlink(os.path.join(dist_data_scripts, entry)) - else: - os.rename( - os.path.join(dist_data_scripts, entry), - os.path.join(egg_info_scripts, entry), - ) - os.rmdir(dist_data_scripts) - for subdir in filter(os.path.exists, ( - os.path.join(dist_data, d) - for d in ('data', 'headers', 'purelib', 'platlib') - )): - unpack(subdir, destination_eggdir) - if os.path.exists(dist_data): - os.rmdir(dist_data) - - @staticmethod - def _fix_namespace_packages(egg_info, destination_eggdir): - namespace_packages = os.path.join( - egg_info, 'namespace_packages.txt') - if os.path.exists(namespace_packages): - with open(namespace_packages) as fp: - namespace_packages = fp.read().split() - for mod in namespace_packages: - mod_dir = os.path.join(destination_eggdir, *mod.split('.')) - mod_init = os.path.join(mod_dir, '__init__.py') - if not os.path.exists(mod_dir): - os.mkdir(mod_dir) - if not os.path.exists(mod_init): - with open(mod_init, 'w') as fp: - fp.write(NAMESPACE_PACKAGE_INIT) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/windows_support.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/windows_support.py deleted file mode 100755 index cb977cf..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/windows_support.py +++ /dev/null @@ -1,29 +0,0 @@ -import platform -import ctypes - - -def windows_only(func): - if platform.system() != 'Windows': - return lambda *args, **kwargs: None - return func - - -@windows_only -def hide_file(path): - """ - Set the hidden attribute on a file or directory. - - From http://stackoverflow.com/questions/19622133/ - - `path` must be text. - """ - __import__('ctypes.wintypes') - SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW - SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD - SetFileAttributes.restype = ctypes.wintypes.BOOL - - FILE_ATTRIBUTE_HIDDEN = 0x02 - - ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN) - if not ret: - raise ctypes.WinError() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/six.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/six.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/socks.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/socks.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sockshandler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sockshandler.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pyrsistent-0.18.1.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/LICENSE new file mode 100644 index 0000000..d13065d --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 Splunk Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/METADATA similarity index 88% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/METADATA index 072d32c..e502ebe 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: solnlib -Version: 4.4.1 +Version: 4.7.0 Summary: The Splunk Software Development Kit for Splunk Solutions Home-page: https://github.com/splunk/addonfactory-solutions-library-python License: Apache-2.0 @@ -9,12 +9,12 @@ Author-email: addonfactory@splunk.com Requires-Python: >=3.7,<4.0 Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 Requires-Dist: defusedxml (>=0.7.1,<0.8.0) -Requires-Dist: requests (>=2.24,<3.0) +Requires-Dist: requests (>=2.28.0,<3.0.0) Requires-Dist: sortedcontainers (>=2.2,<3.0) -Requires-Dist: splunk-sdk (>=1.6.16,<2.0.0) +Requires-Dist: splunk-sdk (>=1.6.18) Project-URL: Repository, https://github.com/splunk/addonfactory-solutions-library-python diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/RECORD similarity index 58% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/RECORD rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/RECORD index 21d2198..ad2349a 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/RECORD @@ -1,29 +1,29 @@ -solnlib-4.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -solnlib-4.4.1.dist-info/LICENSE,sha256=Xvvd894DEl8lUHPEeFU-Ya18RW7Hc2IlPTZS_bE3Hgs,11341 -solnlib-4.4.1.dist-info/METADATA,sha256=Q4KZvFg0an3EwL1t6TEDM-vnTO2bt3SgKsBsR8r344Q,864 -solnlib-4.4.1.dist-info/RECORD,, -solnlib-4.4.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -solnlib-4.4.1.dist-info/WHEEL,sha256=y3eDiaFVSNTPbgzfNn0nYn5tEn1cX6WrdetDlQM4xWw,83 -solnlib/__init__.py,sha256=R_ntDnE2qXBUVOu52kT1YZckTJDUSHPWvm5t8r5HTc4,1244 +solnlib-4.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +solnlib-4.7.0.dist-info/LICENSE,sha256=Xvvd894DEl8lUHPEeFU-Ya18RW7Hc2IlPTZS_bE3Hgs,11341 +solnlib-4.7.0.dist-info/METADATA,sha256=_JuRZwNilG8g6zQca7rXrPAgWkbT-OjrNAeeMV9slYU,861 +solnlib-4.7.0.dist-info/RECORD,, +solnlib-4.7.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +solnlib-4.7.0.dist-info/WHEEL,sha256=bbU3AyvhQ312rVm7zzRQjs6axI1UYWC3nmFA2E6FFSI,88 +solnlib/__init__.py,sha256=-wyuqIkJDeMKLH5QjDC7pMZwIsWSTrBx0uYqs0AI_qI,1244 solnlib/_utils.py,sha256=g8c5sLcKYtsdJDf8xfufQxeEf153hFXA83mqw17eNrg,2735 solnlib/acl.py,sha256=WLe7X7lBiaGRtUSqOmUKq38Z0kMnZL9_fPx5XnXd_I4,5901 -solnlib/conf_manager.py,sha256=KjFZEBCCxgU2jkZnESZye-uu-PqKNcOX90M9X-ZIc-s,15048 -solnlib/credentials.py,sha256=Sx7iLtGjzkvcOFBedS9lUxGeyah5cJXOIswTQQ727R8,11575 -solnlib/file_monitor.py,sha256=JBhq1xj6IUMV1JKGtbTu4fQfqFQzmEYDrbqOxNDiywY,3923 +solnlib/conf_manager.py,sha256=zpzNSE1ocXrBXPhd-_Zipr2mLf7My2ix6e9ItMsd08o,16897 +solnlib/credentials.py,sha256=8mr0t_bxz8wKt_ZIiFkqKxckDaxRhkZKP2Q48BHahD8,13862 +solnlib/file_monitor.py,sha256=LRDNOtFxz83AIcWrhmorPOv1JoWvjpd6AtuM5_ynem0,3974 solnlib/hec_config.py,sha256=1QuFfOmpIAofVVC2NvCo4Hvre8YER7RhAriDjMv0sDI,5050 solnlib/log.py,sha256=10Ro9RfhTDrDd36t_tefH9NXUNugRx5qO_l-EDbOP50,7269 solnlib/modular_input/__init__.py,sha256=ln0Lqf55KfIZUDYy7vzQOvC6Y5_J1vSS7N1KzbO04QM,1186 solnlib/modular_input/checkpointer.py,sha256=-X22d3kqYrqiyefw3kQtc_IeQTDqaTw0iKc9ZRicLiY,8688 solnlib/modular_input/event.py,sha256=ERrILk7nQnERmgDP522aC017lvIsGAJqsurbupTlPjY,7849 solnlib/modular_input/event_writer.py,sha256=IGPSgBgv9aHLXMfE_Bd6-fDDaoPfIEHlXJLFNFzwZBc,15618 -solnlib/modular_input/modular_input.py,sha256=OVBLMLgoE4L-3E8VC-rnWBG5mm3TbaWNryr0tV4rVvk,17945 +solnlib/modular_input/modular_input.py,sha256=uOzxAuBSGZM2dzsqZvD6pzPGe2WPMtIssrSAyTm2EfU,17930 solnlib/net_utils.py,sha256=x0gyS6JR2LjiwaDIzZvp-mlzBtfdAbVdYUT52WQqP4k,3840 solnlib/orphan_process_monitor.py,sha256=evo8yPH6vdoKjllN-cB6kZhkwMkCvhr-drl85zUXALY,3214 solnlib/pattern.py,sha256=oBahtj7xVlWOaaLPeqdrpud6vVUl05fhx6r2YUk9IdI,1158 solnlib/server_info.py,sha256=xgQh_ZNtHm9knCXJQMPZVemuGzRK-hXOjrtV8WVbfVM,7856 solnlib/splunk_rest_client.py,sha256=qA77SfsFvVmeq1YlTeBlmjpkWBb6NQrHS1YLvX8p5x4,7182 -solnlib/splunkenv.py,sha256=te172bEKUTZPsKLa9CHiPZ918Bz0KdFMI7sZwxDP8zA,7817 +solnlib/splunkenv.py,sha256=HQslPfWJxzB3S4LFC90apqJl2iaAO9sWoK6Xqfdcmn4,7796 solnlib/time_parser.py,sha256=XQkw5eWo4nz5KmaIv5zw8dQdtsqTiT3RljDYw2aYLa4,4664 solnlib/timer_queue.py,sha256=wzqZXJ2_2uKX4RPpkrie7QZyKU4XuoEv_UBVfl9puHw,9968 solnlib/user_access.py,sha256=sUmYArPRhPAkMls4AUbBHD0Kniz3VEFaNY6NOoRNpUc,29171 -solnlib/utils.py,sha256=QHFTuDI-9nB83YDCPvf4XNuoGbpO68UcVJW5gOwXnb0,4502 +solnlib/utils.py,sha256=E4EFu0LYHME6YYMDbfiHRnWSlNptqGNsvG2P7YEncTc,5251 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/WHEEL new file mode 100644 index 0000000..2892f30 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.7.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: poetry-core 1.1.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/__init__.py old mode 100755 new mode 100644 index fe45dd1..852468b --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/__init__.py @@ -54,4 +54,4 @@ "utils", ] -__version__ = "4.4.1" +__version__ = "4.7.0" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/_utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/_utils.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/acl.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/acl.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/conf_manager.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/conf_manager.py old mode 100755 new mode 100644 index a1dd219..3e38337 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/conf_manager.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/conf_manager.py @@ -87,7 +87,8 @@ def __init__( self._port = port self._context = context self._cred_manager = None - ### 'realm' is set to provided 'realm' argument otherwise as default behaviour it is set to 'APP_NAME'. + # 'realm' is set to provided 'realm' argument otherwise as default + # behaviour it is set to 'APP_NAME'. if realm is None: self._realm = self._app else: @@ -337,7 +338,7 @@ def delete(self, stanza_name: str): try: self._conf.delete(stanza_name) - except KeyError as e: + except KeyError: logging.error( "Delete stanza: %s error: %s.", stanza_name, traceback.format_exc() ) @@ -381,8 +382,11 @@ class ConfManager: `credential:__REST_CREDENTIAL__#Splunk_TA_test#configs/conf-CONF_FILENAME:STANZA_NAME``splunk_cred_sep``1:` >>> from solnlib import conf_manager - >>> cfm = conf_manager.ConfManager(session_key, - 'Splunk_TA_test', realm='__REST_CREDENTIAL__#Splunk_TA_test#configs/conf-CONF_FILENAME') + >>> cfm = conf_manager.ConfManager( + session_key, + 'Splunk_TA_test', + realm='__REST_CREDENTIAL__#Splunk_TA_test#configs/conf-CONF_FILENAME' + ) """ def __init__( @@ -452,7 +456,7 @@ def get_conf(self, name: str, refresh: bool = False) -> ConfFile: try: conf = self._confs[name] except KeyError: - raise ConfManagerException("Config file: %s does not exist." % name) + raise ConfManagerException(f"Config file: {name} does not exist.") return ConfFile( name, @@ -494,3 +498,60 @@ def create_conf(self, name: str) -> ConfFile: self._realm, **self._context, ) + + +def get_log_level( + *, + logger: logging.Logger, + session_key: str, + app_name: str, + conf_name: str, + log_level_field: str = "loglevel", + default_log_level: str = "INFO", +) -> str: + """This function returns the log level for the addon from configuration + file. + + Arguments: + logger: Logger. + session_key: Splunk access token. + app_name: Add-on name. + conf_name: Configuration file name where logging stanza is. + log_level_field: Logging level field name under logging stanza. + default_log_level: Default log level to return in case of errors. + + Returns: + Log level defined under `logging.log_level_field` field in `conf_name` + file. In case of any error, `default_log_level` will be returned. + + Examples: + >>> from solnlib import conf_manager + >>> log_level = conf_manager.get_log_level( + >>> logger, + >>> "session_key", + >>> "ADDON_NAME", + >>> "splunk_ta_addon_settings", + >>> ) + """ + try: + cfm = ConfManager( + session_key, + app_name, + realm=f"__REST_CREDENTIAL__#{app_name}#configs/conf-{conf_name}", + ) + conf = cfm.get_conf(conf_name) + except ConfManagerException: + logger.error( + f"Failed to fetch configuration file {conf_name}, " + f"taking {default_log_level} as log level." + ) + return default_log_level + try: + logging_details = conf.get("logging") + return logging_details.get(log_level_field, default_log_level) + except ConfStanzaNotExistException: + logger.error( + f'"logging" stanza does not exist under {conf_name}, ' + f"taking {default_log_level} as log level." + ) + return default_log_level diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/credentials.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/credentials.py old mode 100755 new mode 100644 index 85c5c0f..53f0237 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/credentials.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/credentials.py @@ -18,8 +18,10 @@ import json import re +import warnings +from typing import Dict, List -from splunklib import binding +from splunklib import binding, client from . import splunk_rest_client as rest_client from .net_utils import validate_scheme_host_port @@ -79,7 +81,7 @@ def __init__( port: int = None, **context: dict, ): - """Initializes CredentialsManager. + """Initializes CredentialManager. Arguments: session_key: Splunk access token. @@ -123,9 +125,11 @@ def get_password(self, user: str) -> str: realm='realm_test') >>> cm.get_password('testuser2') """ - - all_passwords = self._get_all_passwords() - for password in all_passwords: + if self._realm is not None: + passwords = self.get_clear_passwords_in_realm() + else: + passwords = self.get_clear_passwords() + for password in passwords: if password["username"] == user and password["realm"] == self._realm: return password["clear_password"] @@ -151,7 +155,9 @@ def set_password(self, user: str, password: str): length = 0 index = 1 while length < len(password): - curr_str = password[length : length + self.SPLUNK_CRED_LEN_LIMIT] + curr_str = password[ + length : length + self.SPLUNK_CRED_LEN_LIMIT # noqa: E203 + ] partial_user = self.SEP.join([user, str(index)]) self._update_password(partial_user, curr_str) length += self.SPLUNK_CRED_LEN_LIMIT @@ -180,14 +186,16 @@ def _update_password(self, user: str, password: str): self._storage_passwords.create(password, user, self._realm) except binding.HTTPError as ex: if ex.status == 409: - all_passwords = self._get_all_passwords_in_realm() - for pwd_stanza in all_passwords: + if self._realm is not None: + passwords = self.get_raw_passwords_in_realm() + else: + passwords = self.get_raw_passwords() + for pwd_stanza in passwords: if pwd_stanza.realm == self._realm and pwd_stanza.username == user: pwd_stanza.update(password=password) return raise ValueError( - "Can not get the password object for realm: %s user: %s" - % (self._realm, user) + f"Can not get the password object for realm: {self._realm} user: {user}" ) else: raise ex @@ -209,12 +217,15 @@ def delete_password(self, user: str): realm='realm_test') >>> cm.delete_password('testuser1') """ - all_passwords = self._get_all_passwords_in_realm() + if self._realm is not None: + passwords = self.get_raw_passwords_in_realm() + else: + passwords = self.get_raw_passwords() deleted = False ent_pattern = re.compile( r"({}{}\d+)".format(user.replace("\\", "\\\\"), self.SEP) ) - for password in list(all_passwords): + for password in passwords: match = (user == password.username) or ent_pattern.match(password.username) if match and password.realm == self._realm: password.delete() @@ -222,12 +233,45 @@ def delete_password(self, user: str): if not deleted: raise CredentialNotExistException( - "Failed to delete password of realm={}, user={}".format( - self._realm, user - ) + f"Failed to delete password of realm={self._realm}, user={user}" ) - def _get_all_passwords_in_realm(self): + def get_raw_passwords(self) -> List[client.StoragePassword]: + """Returns all passwords in the "raw" format.""" + warnings.warn( + "Please pass realm to the CredentialManager, " + "so it can utilize get_raw_passwords_in_realm method instead." + ) + return self._storage_passwords.list(count=-1) + + def get_raw_passwords_in_realm(self) -> List[client.StoragePassword]: + """Returns all passwords within the realm in the "raw" format.""" + if self._realm is None: + raise ValueError("No realm was specified") + return self._storage_passwords.list(count=-1, search=f"realm={self._realm}") + + def get_clear_passwords(self) -> List[Dict[str, str]]: + """Returns all passwords in the "clear" format.""" + warnings.warn( + "Please pass realm to the CredentialManager, " + "so it can utilize get_clear_passwords_in_realm method instead." + ) + raw_passwords = self.get_raw_passwords() + return self._get_clear_passwords(raw_passwords) + + def get_clear_passwords_in_realm(self) -> List[Dict[str, str]]: + """Returns all passwords within the realm in the "clear" format.""" + if self._realm is None: + raise ValueError("No realm was specified") + raw_passwords = self.get_raw_passwords_in_realm() + return self._get_clear_passwords(raw_passwords) + + def _get_all_passwords_in_realm(self) -> List[client.StoragePassword]: + warnings.warn( + "_get_all_passwords_in_realm is deprecated, " + "please use get_raw_passwords_in_realm instead.", + stacklevel=2, + ) if self._realm: all_passwords = self._storage_passwords.list( count=-1, search=f"realm={self._realm}" @@ -236,13 +280,12 @@ def _get_all_passwords_in_realm(self): all_passwords = self._storage_passwords.list(count=-1, search="") return all_passwords - @retry(exceptions=[binding.HTTPError]) - def _get_all_passwords(self): - all_passwords = self._storage_passwords.list(count=-1) - + def _get_clear_passwords( + self, passwords: List[client.StoragePassword] + ) -> List[Dict[str, str]]: results = {} - ptn = re.compile(fr"(.+){self.SEP}(\d+)") - for password in all_passwords: + ptn = re.compile(rf"(.+){self.SEP}(\d+)") + for password in passwords: match = ptn.match(password.name) if match: actual_name = match.group(1) + ":" @@ -261,7 +304,7 @@ def _get_all_passwords(self): # Backward compatibility # To deal with the password with only one stanza which is generated by the old version. - for password in all_passwords: + for password in passwords: match = ptn.match(password.name) if (not match) and (password.name not in results): results[password.name] = { @@ -287,6 +330,16 @@ def _get_all_passwords(self): return list(results.values()) + @retry(exceptions=[binding.HTTPError]) + def _get_all_passwords(self) -> List[Dict[str, str]]: + warnings.warn( + "_get_all_passwords is deprecated, " + "please use get_all_passwords_in_realm instead.", + stacklevel=2, + ) + passwords = self._storage_passwords.list(count=-1) + return self._get_clear_passwords(passwords) + @retry(exceptions=[binding.HTTPError]) def get_session_key( @@ -315,7 +368,7 @@ def get_session_key( ValueError: if scheme, host or port are invalid. Examples: - >>> credentials.get_session_key('user', 'password') + >>> get_session_key('user', 'password') """ validate_scheme_host_port(scheme, host, port) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/file_monitor.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/file_monitor.py old mode 100755 new mode 100644 index 0989595..65fd655 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/file_monitor.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/file_monitor.py @@ -22,7 +22,7 @@ import threading import time import traceback -from typing import Callable, List +from typing import Any, Callable, List __all__ = ["FileChangesChecker", "FileMonitor"] @@ -30,7 +30,7 @@ class FileChangesChecker: """Files change checker.""" - def __init__(self, callback: Callable, files: List): + def __init__(self, callback: Callable[[List[str]], Any], files: List): """Initializes FileChangesChecker. Arguments: @@ -45,7 +45,7 @@ def __init__(self, callback: Callable, files: List): try: self.file_mtimes[k] = op.getmtime(k) except OSError: - logging.debug("Getmtime for %s, failed: %s", k, traceback.format_exc()) + logging.debug(f"Getmtime for {k}, failed: {traceback.format_exc()}") def check_changes(self) -> bool: """Check files change. @@ -56,8 +56,7 @@ def check_changes(self) -> bool: Returns: True if files changed else False """ - - logging.debug("Checking files=%s", self._files) + logging.debug(f"Checking files={self._files}") file_mtimes = self.file_mtimes changed_files = [] for f, last_mtime in list(file_mtimes.items()): @@ -66,10 +65,9 @@ def check_changes(self) -> bool: if current_mtime != last_mtime: file_mtimes[f] = current_mtime changed_files.append(f) - logging.info("Detect %s has changed", f) + logging.info(f"Detect {f} has changed", f) except OSError: pass - if changed_files: if self._callback: self._callback(changed_files) @@ -89,7 +87,9 @@ class FileMonitor: >>> fm.start() """ - def __init__(self, callback: Callable, files: List, interval: int = 1): + def __init__( + self, callback: Callable[[List[str]], Any], files: List, interval: int = 1 + ): """Initializes FileMonitor. Arguments: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/hec_config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/hec_config.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/log.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/log.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/checkpointer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/checkpointer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/event.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/event.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/event_writer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/event_writer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/modular_input.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/modular_input.py old mode 100755 new mode 100644 index 8096747..b2256b1 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/modular_input.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/modular_input/modular_input.py @@ -194,7 +194,7 @@ def _create_checkpointer(self): host=self.server_host, port=self.server_port, ) - except binding.HTTPError as e: + except binding.HTTPError: logging.error( "Failed to init kvstore checkpointer: %s.", traceback.format_exc() ) @@ -231,7 +231,7 @@ def _create_event_writer(self): host=self.server_host, port=self.server_port, ) - except binding.HTTPError as e: + except binding.HTTPError: logging.error( "Failed to init HECEventWriter: %s.", traceback.format_exc() ) @@ -464,7 +464,7 @@ def execute(self): self.do_run(input_definition["inputs"]) logging.info("Modular input: %s exit normally.", self.name) return 0 - except Exception as e: + except Exception: logging.error( "Modular input: %s exit with exception: %s.", self.name, diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/net_utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/net_utils.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/orphan_process_monitor.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/orphan_process_monitor.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/pattern.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/pattern.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/server_info.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/server_info.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/splunk_rest_client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/splunk_rest_client.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/splunkenv.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/splunkenv.py old mode 100755 new mode 100644 index 33184c3..2008535 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/splunkenv.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/splunkenv.py @@ -24,8 +24,6 @@ from io import StringIO from typing import List, Optional, Tuple, Union -from . import utils - __all__ = [ "make_splunkhome_path", "get_splunk_host_info", diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/time_parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/time_parser.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/timer_queue.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/timer_queue.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/user_access.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/user_access.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/utils.py old mode 100755 new mode 100644 index f2e684c..fda4264 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/utils.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib/utils.py @@ -33,9 +33,34 @@ "is_false", "retry", "extract_http_scheme_host_port", + "remove_http_proxy_env_vars", ] +def remove_http_proxy_env_vars() -> None: + """Removes HTTP(s) proxies from environment variables. + + Removes the following environment variables: + * http_proxy + * https_proxy + * HTTP_PROXY + * HTTPS_PROXY + + This function can be used in Splunk modular inputs code before starting the + ingestion to ensure that no proxy is going to be used when doing requests. + In case of proxy is needed, it can be defined in the modular inputs code. + """ + env_vars_to_remove = ( + "http_proxy", + "https_proxy", + "HTTP_PROXY", + "HTTPS_PROXY", + ) + for env_var in env_vars_to_remove: + if env_var in os.environ: + del os.environ[env_var] + + def handle_teardown_signals(callback: Callable): """Register handler for SIGTERM/SIGINT/SIGBREAK signal. @@ -137,7 +162,7 @@ def wrapper(*args, **kwargs): ): last_ex = e if i < max_tries - 1: - time.sleep(2 ** i) + time.sleep(2**i) else: raise diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sorteddict.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sorteddict.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sortedlist.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sortedlist.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sortedset.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/sortedcontainers/sortedset.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_aoblib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_aoblib/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_aoblib/utility.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_aoblib/utility.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/requests-2.26.0.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/METADATA similarity index 91% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/METADATA index 95aba7d..d478c5b 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: splunk-sdk -Version: 1.6.16 +Version: 1.6.18 Summary: The Splunk Software Development Kit for Python. Home-page: http://github.com/splunk/splunk-sdk-python Author: Splunk, Inc. @@ -8,7 +8,7 @@ Author-email: devinfo@splunk.com License: http://www.apache.org/licenses/LICENSE-2.0 Platform: UNKNOWN Classifier: Programming Language :: Python -Classifier: Development Status :: 3 - Alpha +Classifier: Development Status :: 6 - Mature Classifier: Environment :: Other Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: Apache Software License diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/RECORD similarity index 64% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/RECORD rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/RECORD index ce272a1..ca2fc00 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/RECORD @@ -1,17 +1,17 @@ -splunk_sdk-1.6.16.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -splunk_sdk-1.6.16.dist-info/METADATA,sha256=YvUn4EJg0J9m_UCjnPgfSLA96trlkTzHvOXRmAZ7ZFg,740 -splunk_sdk-1.6.16.dist-info/RECORD,, -splunk_sdk-1.6.16.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -splunk_sdk-1.6.16.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -splunk_sdk-1.6.16.dist-info/top_level.txt,sha256=taq2YYfOB31tNefwW73s54xytiB_sy8bpbyBAfn0kr8,10 -splunklib/__init__.py,sha256=izuaantnftizId8OM0z5tGWaaRWHrV2oSLBs7o5v2ak,772 -splunklib/binding.py,sha256=mVIVCPBGPaif1TZerYg5y8knjEiCxocF66U_UNUerec,58046 -splunklib/client.py,sha256=O0HWJdXsUKmOV1pRZbkF2EJXxTlBeFF4KVCKu-TZuwI,143072 +splunk_sdk-1.6.18.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +splunk_sdk-1.6.18.dist-info/METADATA,sha256=5ED-1H2wwnFJCt_D8WCmTPPXsqka2En6ECHtGBV20x0,741 +splunk_sdk-1.6.18.dist-info/RECORD,, +splunk_sdk-1.6.18.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +splunk_sdk-1.6.18.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +splunk_sdk-1.6.18.dist-info/top_level.txt,sha256=taq2YYfOB31tNefwW73s54xytiB_sy8bpbyBAfn0kr8,10 +splunklib/__init__.py,sha256=1J8iSAeiM_2xN9uSvFnqTT9krVVB0-8gUZvxLGl8QbA,772 +splunklib/binding.py,sha256=ui61YzW0zKpYjUrdaqyyHM_mhshvyRaH5_mGbKLY8zc,58384 +splunklib/client.py,sha256=BuQ4AQguuvPnyH4t52M4zjZGBOf2Bn8TgBXJ9RX2euk,144291 splunklib/data.py,sha256=LUxwBhhrAFhRMhwiVEew-JDYHQAGcoWn2lilZjZHUFQ,8528 splunklib/modularinput/__init__.py,sha256=bHyI0p3GtBoAjbFCEbuUibEFKwnHDyZzZOwS4Fvt0zs,400 splunklib/modularinput/argument.py,sha256=OGYDCW1Fw7PsMygxetwzTH2YGm1fdzErZXxUYyB-WNU,4219 splunklib/modularinput/event.py,sha256=xV-n38M4xFU8OB3vcT_u8cllWGBo5BHGWNYXTcfJOIk,4456 -splunklib/modularinput/event_writer.py,sha256=F8mcZg3WWIyY8qyqFoAS-TWPztBjUiRtsrfVVrUetBU,2885 +splunklib/modularinput/event_writer.py,sha256=d13bzKcJYawN1Q-S2iuHROl2qheaVAiuCTW2wEeKaZE,2844 splunklib/modularinput/input_definition.py,sha256=RE_QnfBLXA2JhdEshvclVzTBKcppY_ktQ0lOAlGUPNc,1888 splunklib/modularinput/scheme.py,sha256=wllFtEnV90MY9FNrPR3OoIhdzVU4blaF7ONaEcL3ekQ,3073 splunklib/modularinput/script.py,sha256=YZM7UKIFuLkkNldlGAtk8HfbwzZ11uev0Ig8vsMVGWo,6597 @@ -24,10 +24,10 @@ splunklib/searchcommands/decorators.py,sha256=ERS8HP8PsGV4BCu9KgsxkC6UKwwIBFjGDW splunklib/searchcommands/environment.py,sha256=mX52tmJigBDnguIrzvzMHFu9XysDkGi6WNl5GuUKKTs,4683 splunklib/searchcommands/eventing_command.py,sha256=Rxy88uB95xFQ2Gq4U7e45Roc-zGRM1X2zdfIIbRNh-I,5432 splunklib/searchcommands/external_search_command.py,sha256=ml-_4qMBZx_M0OYiCevAdpRZBLG_ih7pTRsmElbn2a0,7872 -splunklib/searchcommands/generating_command.py,sha256=jWx5VQ-GO9bCjnFcnf4LT1AtVNQdGoyp3KqUdGiWyXg,17670 -splunklib/searchcommands/internals.py,sha256=UzMF1sarZUZ9Tatk0Jp7JMiOhrM273ChJVbdyvnHYYE,28748 +splunklib/searchcommands/generating_command.py,sha256=93xwHtQIRUIlxdG2uhJvxk6omp4SELefM8wqsfKpXNE,18949 +splunklib/searchcommands/internals.py,sha256=agV_877lxxhu4jHrOMv_LE4wCMxJ8dckIejYzOeVG1E,28913 splunklib/searchcommands/reporting_command.py,sha256=QFNyGO-EQlW01BxEEw5c7JWh4IAfhm29SlAKgjftuyY,9697 -splunklib/searchcommands/search_command.py,sha256=jFh1AViNJcRTrlua9WEpKPtkv9bGHvrnO5njO7hcwUg,39250 +splunklib/searchcommands/search_command.py,sha256=_pPp_9GJZzgQddAzTpASbDBGitErgUH1Xc4vV7GeueU,40290 splunklib/searchcommands/streaming_command.py,sha256=qkVpXsNKQSWoTE1ZLKJvK7aPFHQydPFj9TP_x7wLntc,6778 splunklib/searchcommands/validators.py,sha256=gXlH1x9Xr0BX2gSqGF6M9mP5ntrRAn3xsIKP_BxXT6k,11877 splunklib/six.py,sha256=Q6WvEXZ1DGEASAo3CGNCJkKv2tPy8xkSmK-VHE9PYIA,34074 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/WHEEL new file mode 100644 index 0000000..1f37c02 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.18.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/__init__.py old mode 100755 new mode 100644 index 525dc8e..41c261f --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/__init__.py @@ -16,5 +16,5 @@ from __future__ import absolute_import from splunklib.six.moves import map -__version_info__ = (1, 6, 16) +__version_info__ = (1, 6, 18) __version__ = ".".join(map(str, __version_info__)) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/binding.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/binding.py old mode 100755 new mode 100644 index c3121fb..94cc558 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/binding.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/binding.py @@ -30,7 +30,6 @@ import logging import socket import ssl -import sys from base64 import b64encode from contextlib import contextmanager from datetime import datetime @@ -39,7 +38,6 @@ from xml.etree.ElementTree import XML from splunklib import six -from splunklib.six import StringIO from splunklib.six.moves import urllib from .data import record @@ -471,7 +469,7 @@ class Context(object): """ def __init__(self, handler=None, **kwargs): self.http = HttpLib(handler, kwargs.get("verify", False), key_file=kwargs.get("key_file"), - cert_file=kwargs.get("cert_file")) # Default to False for backward compat + cert_file=kwargs.get("cert_file"), context=kwargs.get("context")) # Default to False for backward compat self.token = kwargs.get("token", _NoAuthenticationToken) if self.token is None: # In case someone explicitly passes token=None self.token = _NoAuthenticationToken @@ -1070,7 +1068,7 @@ def __init__(self, message, cause): # # Encode the given kwargs as a query string. This wrapper will also _encode -# a list value as a sequence of assignemnts to the corresponding arg name, +# a list value as a sequence of assignments to the corresponding arg name, # for example an argument such as 'foo=[1,2,3]' will be encoded as # 'foo=1&foo=2&foo=3'. def _encode(**kwargs): @@ -1137,9 +1135,9 @@ class HttpLib(object): If using the default handler, SSL verification can be disabled by passing verify=False. """ - def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None): + def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None, context=None): if custom_handler is None: - self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file) + self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file, context=context) else: self.handler = custom_handler self._cookies = {} @@ -1351,7 +1349,7 @@ def readinto(self, byte_array): return bytes_read -def handler(key_file=None, cert_file=None, timeout=None, verify=False): +def handler(key_file=None, cert_file=None, timeout=None, verify=False, context=None): """This class returns an instance of the default HTTP request handler using the values you provide. @@ -1363,6 +1361,8 @@ def handler(key_file=None, cert_file=None, timeout=None, verify=False): :type timeout: ``integer`` or "None" :param `verify`: Set to False to disable SSL verification on https connections. :type verify: ``Boolean`` + :param `context`: The SSLContext that can is used with the HTTPSConnection when verify=True is enabled and context is specified + :type context: ``SSLContext` """ def connect(scheme, host, port): @@ -1376,6 +1376,10 @@ def connect(scheme, host, port): if not verify: kwargs['context'] = ssl._create_unverified_context() + elif context: + # verify is True in elif branch and context is not None + kwargs['context'] = context + return six.moves.http_client.HTTPSConnection(host, port, **kwargs) raise ValueError("unsupported scheme: %s" % scheme) @@ -1385,7 +1389,7 @@ def request(url, message, **kwargs): head = { "Content-Length": str(len(body)), "Host": host, - "User-Agent": "splunk-sdk-python/1.6.16", + "User-Agent": "splunk-sdk-python/1.6.18", "Accept": "*/*", "Connection": "Close", } # defaults diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/client.py old mode 100755 new mode 100644 index 39b1dcc..21d27a6 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/client.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/client.py @@ -295,7 +295,7 @@ def connect(**kwargs): :type port: ``integer`` :param scheme: The scheme for accessing the service (the default is "https"). :type scheme: "https" or "http" - :param verify: Enable (True) or disable (False) SSL verrification for + :param verify: Enable (True) or disable (False) SSL verification for https connections. (optional, the default is True) :type verify: ``Boolean`` :param `owner`: The owner context of the namespace (optional). @@ -318,6 +318,8 @@ def connect(**kwargs): :type username: ``string`` :param `password`: The password for the Splunk account. :type password: ``string`` + :param `context`: The SSLContext that can be used when setting verify=True (optional) + :type context: ``SSLContext`` :return: An initialized :class:`Service` connection. **Example**:: @@ -365,7 +367,7 @@ class Service(_BaseService): :type port: ``integer`` :param scheme: The scheme for accessing the service (the default is "https"). :type scheme: "https" or "http" - :param verify: Enable (True) or disable (False) SSL verrification for + :param verify: Enable (True) or disable (False) SSL verification for https connections. (optional, the default is True) :type verify: ``Boolean`` :param `owner`: The owner context of the namespace (optional; use "-" for wildcard). @@ -401,6 +403,7 @@ class Service(_BaseService): def __init__(self, **kwargs): super(Service, self).__init__(**kwargs) self._splunk_version = None + self._kvstore_owner = None @property def apps(self): @@ -673,12 +676,34 @@ def splunk_version(self): self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')]) return self._splunk_version + @property + def kvstore_owner(self): + """Returns the KVStore owner for this instance of Splunk. + + By default is the kvstore owner is not set, it will return "nobody" + :return: A string with the KVStore owner. + """ + if self._kvstore_owner is None: + self._kvstore_owner = "nobody" + return self._kvstore_owner + + @kvstore_owner.setter + def kvstore_owner(self, value): + """ + kvstore is refreshed, when the owner value is changed + """ + self._kvstore_owner = value + self.kvstore + @property def kvstore(self): """Returns the collection of KV Store collections. + sets the owner for the namespace, before retrieving the KVStore Collection + :return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities. """ + self.namespace['owner'] = self.kvstore_owner return KVStoreCollections(self) @property @@ -856,7 +881,7 @@ class Entity(Endpoint): ent.whitelist However, because some of the field names are not valid Python identifiers, - the dictionary-like syntax is preferrable. + the dictionary-like syntax is preferable. The state of an :class:`Entity` object is cached, so accessing a field does not contact the server. If you think the values on the @@ -3619,7 +3644,7 @@ def __init__(self, collection): self.service = collection.service self.collection = collection self.owner, self.app, self.sharing = collection._proper_namespace() - self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/' + self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name, encode_slash=True) + '/' def _get(self, url, **kwargs): return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs) @@ -3640,6 +3665,11 @@ def query(self, **query): :return: Array of documents retrieved by query. :rtype: ``array`` """ + + for key, value in query.items(): + if isinstance(query[key], dict): + query[key] = json.dumps(value) + return json.loads(self._get('', **query).body.read().decode('utf-8')) def query_by_id(self, id): @@ -3652,7 +3682,7 @@ def query_by_id(self, id): :return: Document with id :rtype: ``dict`` """ - return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8')) + return json.loads(self._get(UrlEncoded(str(id), encode_slash=True)).body.read().decode('utf-8')) def insert(self, data): """ @@ -3664,6 +3694,8 @@ def insert(self, data): :return: _id of inserted object :rtype: ``dict`` """ + if isinstance(data, dict): + data = json.dumps(data) return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8')) def delete(self, query=None): @@ -3686,7 +3718,7 @@ def delete_by_id(self, id): :return: Result of DELETE request """ - return self._delete(UrlEncoded(str(id))) + return self._delete(UrlEncoded(str(id), encode_slash=True)) def update(self, id, data): """ @@ -3700,7 +3732,9 @@ def update(self, id, data): :return: id of replaced document :rtype: ``dict`` """ - return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8')) + if isinstance(data, dict): + data = json.dumps(data) + return json.loads(self._post(UrlEncoded(str(id), encode_slash=True), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8')) def batch_find(self, *dbqueries): """ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/data.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/data.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/argument.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/argument.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/event.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/event.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/event_writer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/event_writer.py old mode 100755 new mode 100644 index 3e43210..b868a18 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/event_writer.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/event_writer.py @@ -15,7 +15,6 @@ from __future__ import absolute_import import sys -from io import TextIOWrapper, TextIOBase from splunklib.six import ensure_str from .event import ET diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/input_definition.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/input_definition.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/scheme.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/scheme.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/script.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/script.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/utils.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/utils.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/validation_definition.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/modularinput/validation_definition.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/ordereddict.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/ordereddict.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/results.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/results.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/decorators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/decorators.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/environment.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/environment.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/eventing_command.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/eventing_command.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/external_search_command.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/external_search_command.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/generating_command.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/generating_command.py old mode 100755 new mode 100644 index 724d45d..6a75d2c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/generating_command.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/generating_command.py @@ -15,6 +15,7 @@ # under the License. from __future__ import absolute_import, division, print_function, unicode_literals +import sys from .decorators import ConfigurationSetting from .search_command import SearchCommand @@ -212,13 +213,49 @@ def _execute(self, ifile, process): def _execute_chunk_v2(self, process, chunk): count = 0 + records = [] for row in process: - self._record_writer.write_record(row) + records.append(row) count += 1 if count == self._record_writer._maxresultrows: - self._finished = False - return - self._finished = True + break + + for row in records: + self._record_writer.write_record(row) + + if count == self._record_writer._maxresultrows: + self._finished = False + else: + self._finished = True + + def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True): + """ Process data. + + :param argv: Command line arguments. + :type argv: list or tuple + + :param ifile: Input data file. + :type ifile: file + + :param ofile: Output data file. + :type ofile: file + + :param allow_empty_input: For generating commands, it must be true. Doing otherwise will cause an error. + :type allow_empty_input: bool + + :return: :const:`None` + :rtype: NoneType + + """ + + # Generating commands are expected to run on an empty set of inputs as the first command being run in a search, + # also this class implements its own separate _execute_chunk_v2 method which does not respect allow_empty_input + # so ensure that allow_empty_input is always True + + if not allow_empty_input: + raise ValueError("allow_empty_input cannot be False for Generating Commands") + else: + return super(GeneratingCommand, self).process(argv=argv, ifile=ifile, ofile=ofile, allow_empty_input=True) # endregion diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/internals.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/internals.py old mode 100755 new mode 100644 index 85f9e0f..fa32f0b --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/internals.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/internals.py @@ -508,6 +508,7 @@ def __init__(self, ofile, maxresultrows=None): self._chunk_count = 0 self._pending_record_count = 0 self._committed_record_count = 0 + self.custom_fields = set() @property def is_flushed(self): @@ -572,6 +573,7 @@ def write_record(self, record): def write_records(self, records): self._ensure_validity() + records = list(records) write_record = self._write_record for record in records: write_record(record) @@ -593,6 +595,7 @@ def _write_record(self, record): if fieldnames is None: self._fieldnames = fieldnames = list(record.keys()) + self._fieldnames.extend([i for i in self.custom_fields if i not in self._fieldnames]) value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames) self._writerow(list(chain.from_iterable(value_list))) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/reporting_command.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/reporting_command.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/search_command.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/search_command.py old mode 100755 new mode 100644 index 7383a5e..5a626cc --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/search_command.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/search_command.py @@ -124,6 +124,7 @@ def __init__(self): self._default_logging_level = self._logger.level self._record_writer = None self._records = None + self._allow_empty_input = True def __str__(self): text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames)) @@ -172,6 +173,14 @@ def logging_level(self, value): raise ValueError('Unrecognized logging level: {}'.format(value)) self._logger.setLevel(level) + def add_field(self, current_record, field_name, field_value): + self._record_writer.custom_fields.add(field_name) + current_record[field_name] = field_value + + def gen_record(self, **record): + self._record_writer.custom_fields |= set(record.keys()) + return record + record = Option(doc=''' **Syntax: record= @@ -413,7 +422,7 @@ def prepare(self): """ pass - def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout): + def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True): """ Process data. :param argv: Command line arguments. @@ -425,10 +434,16 @@ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout): :param ofile: Output data file. :type ofile: file + :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read + :type allow_empty_input: bool + :return: :const:`None` :rtype: NoneType """ + + self._allow_empty_input = allow_empty_input + if len(argv) > 1: self._process_protocol_v1(argv, ifile, ofile) else: @@ -965,13 +980,14 @@ def _execute_v2(self, ifile, process): def _execute_chunk_v2(self, process, chunk): metadata, body = chunk - if len(body) <= 0: - return + if len(body) <= 0 and not self._allow_empty_input: + raise ValueError( + "No records found to process. Set allow_empty_input=True in dispatch function to move forward " + "with empty records.") records = self._read_csv_records(StringIO(body)) self._record_writer.write_records(process(records)) - def _report_unexpected_error(self): error_type, error, tb = sys.exc_info() @@ -1063,8 +1079,7 @@ def iteritems(self): SearchMetric = namedtuple('SearchMetric', ('elapsed_seconds', 'invocation_count', 'input_count', 'output_count')) - -def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None): +def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None, allow_empty_input=True): """ Instantiates and executes a search command class This function implements a `conditional script stanza `_ based on the value of @@ -1087,6 +1102,8 @@ def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys :type output_file: :code:`file` :param module_name: Name of the module calling :code:`dispatch` or :const:`None`. :type module_name: :code:`basestring` + :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read + :type allow_empty_input: bool :returns: :const:`None` **Example** @@ -1124,4 +1141,4 @@ def stream(records): assert issubclass(command_class, SearchCommand) if module_name is None or module_name == '__main__': - command_class().process(argv, input_file, output_file) + command_class().process(argv, input_file, output_file, allow_empty_input) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/streaming_command.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/streaming_command.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/validators.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/searchcommands/validators.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/six.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunklib/six.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/LICENSE similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/LICENSE diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/METADATA similarity index 92% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/METADATA index 0c293da..650bec3 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: splunktalib -Version: 2.2.6 +Version: 3.0.0 Summary: Supporting library for Splunk Add-ons Home-page: https://github.com/splunk/addonfactory-ta-library-python License: Apache-2.0 @@ -14,6 +14,6 @@ Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Requires-Dist: defusedxml (>=0,<1) -Requires-Dist: httplib2 (>=0,<1) +Requires-Dist: requests (>=2.26.0,<3.0.0) Requires-Dist: sortedcontainers (>=2,<3) Project-URL: Repository, https://github.com/splunk/addonfactory-ta-library-python diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/RECORD similarity index 70% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/RECORD rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/RECORD index f57792a..79fd091 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/RECORD @@ -1,15 +1,15 @@ -splunktalib-2.2.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -splunktalib-2.2.6.dist-info/LICENSE,sha256=UOQ5YRO6vOLTeTqm5ezsxIYeUglieyiz-4ligAVIfZI,11342 -splunktalib-2.2.6.dist-info/METADATA,sha256=Wq2ce2ONKZZsruWRORW0TX27QkHa7wMMQtgdqbtq5Lo,783 -splunktalib-2.2.6.dist-info/RECORD,, -splunktalib-2.2.6.dist-info/WHEEL,sha256=y3eDiaFVSNTPbgzfNn0nYn5tEn1cX6WrdetDlQM4xWw,83 -splunktalib/__init__.py,sha256=OYACzU4jahmwdX0RPPPQK81SP7PO_Gr3vjmel3wkj-8,598 +splunktalib-3.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +splunktalib-3.0.0.dist-info/LICENSE,sha256=UOQ5YRO6vOLTeTqm5ezsxIYeUglieyiz-4ligAVIfZI,11342 +splunktalib-3.0.0.dist-info/METADATA,sha256=ZKOMY-iqSy1niaJHf5eAFmjYmkRBE6dQ7Goi_vjVJAc,792 +splunktalib-3.0.0.dist-info/RECORD,, +splunktalib-3.0.0.dist-info/WHEEL,sha256=y3eDiaFVSNTPbgzfNn0nYn5tEn1cX6WrdetDlQM4xWw,83 +splunktalib/__init__.py,sha256=UROe58MAOBIq3MxqcYrDaoOjGz22osZEdz1q3YCzNTY,598 splunktalib/common/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 splunktalib/common/consts.py,sha256=pb1lKSam8tVeh6yIH6k_XN6eFZO7oSihujlPPK4SKaU,594 splunktalib/common/log.py,sha256=JWc-7bUbgIfukwpc9nzQYGpeQJ9WCJSvBVoVQGwadxc,4951 splunktalib/common/pattern.py,sha256=_jgi1rgtcaoSttZTE6pVL37LG7oTXj_N1SnV3cHhNi4,1599 splunktalib/common/util.py,sha256=HlzvppTbTWoQFGtFLEknwkWRzb7WelfQUCvnCBrMZj8,3922 -splunktalib/common/xml_dom_parser.py,sha256=DqiYDpnZnd8VNoriUJfvGLBiJpDsQi-z2SyMXLJ_tS0,2297 +splunktalib/common/xml_dom_parser.py,sha256=V9bo6l0XAr2aDCZqhq4rNtAscKLhlDcXaWE2SElk05M,2251 splunktalib/concurrent/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 splunktalib/concurrent/concurrent_executor.py,sha256=alyEw1ft7EfaQRwD-aVCDUDPpRsvhme1ShPi-YxFvQI,3497 splunktalib/concurrent/process_pool.py,sha256=c1LnOMe9lDd4urpkA8_y9V6Bp9mtC7rxvk_E1E_LJVM,2247 @@ -19,20 +19,20 @@ splunktalib/conf_manager/conf_endpoints.py,sha256=oMG8IgJJE8IKwr273MK9mifYdkP8eC splunktalib/conf_manager/conf_manager.py,sha256=jQy81zS7O62qlG8iWH-l90pez1w7NNctYw5rqpLc2pk,9206 splunktalib/conf_manager/data_input_endpoints.py,sha256=3PmWnBJRq9ZRuKdxsXz8R_094VtSxlUnsyXmQCmM4Q8,7195 splunktalib/conf_manager/property_endpoints.py,sha256=lDfpAhJ1ozZuUPXkpObJYyO8AsWy2luc6XL4MxPrfK0,3467 -splunktalib/conf_manager/request.py,sha256=tmoxrIxAA83YhEm3llaBYCk7Uib9gGzTfl8RWz1Aot0,1817 +splunktalib/conf_manager/request.py,sha256=Xnff88Eajm2YfiSrnR36tFKWKB-ix8Svb4LYsSNCkIE,1796 splunktalib/conf_manager/ta_conf_manager.py,sha256=AKznVbir-6TR4d4U_PqTubFPREmJBtnv1gm2soQ6M9c,6892 -splunktalib/credentials.py,sha256=O9cXDmPlPL-T4GhFgoZTvovBjXOTUjxTzpMDvXctrNc,12999 +splunktalib/credentials.py,sha256=dZLK89ruZ2RhVZVUalKZyq6lhIcr4z4YlwYSG5NCEF4,12871 splunktalib/event_writer.py,sha256=tRsMpiFB8NV737cr0pEMHcvzTNZdnj8Y-b09iChjBH0,5830 splunktalib/file_monitor.py,sha256=RtAZWD_TKJR97tMPD-QVKFI0rKfmMdnzXWWjOKxKdKk,2358 -splunktalib/kv_client.py,sha256=7YL9TMK03mMYNSov9k8RwbwpcXQsIYNxgPhT3jogTNg,7478 -splunktalib/modinput.py,sha256=dO9YnbtFVCrpFA5xMNumKutwWK9zBoKkEnaAFVNXI6A,5215 +splunktalib/kv_client.py,sha256=Izl8ETaRxj_Z3ZZu5vCyyvDenU9r9rrdOk3zFP9F28M,7444 +splunktalib/modinput.py,sha256=VMJyJQgXYKB5HuWvzH3bVWqVbndHxe76gb6yrDIzxbY,5207 splunktalib/orphan_process_monitor.py,sha256=C6hAEMUkX8saNlzKzTQxiYNc-0LdpDseQ4j21WxRo9s,2586 -splunktalib/rest.py,sha256=eCGT28ZHeqz0Qjd41WPCVjc2xgEQyiFPKH4bgsxFl5o,5177 +splunktalib/rest.py,sha256=kNKt3KdcXPPCZIw5ihV_M9cgUkvZxMt_9SrX1BI0x9s,3000 splunktalib/schedule/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 splunktalib/schedule/job.py,sha256=P2UYJxyQE8ZUe2eu2yG4-PdLH-r30qrjmmdJxy0zV6s,3065 splunktalib/schedule/scheduler.py,sha256=sRPJ-wgPXfKOIBFDfbPVAh2ySVtta3iqWo5vjpb5GSY,4637 splunktalib/setting.conf,sha256=eSsEk9cwPaV8MUQN63aN02AYPNShHAp5pcN5vCGyan0,667 -splunktalib/splunk_cluster.py,sha256=vkTEL_K_aVT0ifd_qQVIvYil6eec9Vc3NtE6g93k6sE,2338 +splunktalib/splunk_cluster.py,sha256=CxxbOHgrKujb5Ipre8KL58L3Nrz4HdOdfyfrRdQD6tg,2336 splunktalib/splunk_platform.py,sha256=5kR86y1sHstF-eCw8-zPr-KLvYQh38bXtfWDI6-dbFI,1906 splunktalib/state_store.py,sha256=-PRd644OXc3JsV-gxKbJizcT25LNah7jBIVxLrvEg_M,9147 splunktalib/timer.py,sha256=DQy4LiLRzIHu74jpTNu9m4nc6PG7-6ErHvt0oRmF5VY,2617 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/WHEEL similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-3.0.0.dist-info/WHEEL diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/__init__.py old mode 100755 new mode 100644 index cbad079..4d4b1b7 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/__init__.py @@ -14,4 +14,4 @@ # limitations under the License. # -__version__ = "2.2.6" +__version__ = "3.0.0" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/consts.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/consts.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/log.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/log.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/pattern.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/pattern.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/util.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/xml_dom_parser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/xml_dom_parser.py old mode 100755 new mode 100644 index 74df077..974b9e8 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/xml_dom_parser.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/common/xml_dom_parser.py @@ -23,7 +23,6 @@ def parse_conf_xml_dom(xml_content): """ @xml_content: XML DOM from splunkd """ - xml_content = xml_content.decode("utf-8") m = re.search(r'xmlns="([^"]+)"', xml_content) ns = m.group(1) m = re.search(r'xmlns:s="([^"]+)"', xml_content) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/concurrent_executor.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/concurrent_executor.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/process_pool.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/process_pool.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/thread_pool.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/concurrent/thread_pool.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/conf_endpoints.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/conf_endpoints.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/conf_manager.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/conf_manager.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/data_input_endpoints.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/data_input_endpoints.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/property_endpoints.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/property_endpoints.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/request.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/request.py old mode 100755 new mode 100644 index 1185cd0..c1bc3fb --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/request.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/request.py @@ -36,30 +36,28 @@ def content_request(uri, session_key, method, payload, err_msg): ConfRequestException """ - resp, content = rest.splunkd_request( - uri, session_key, method, data=payload, retry=3 - ) - if resp is None and content is None: + resp = rest.splunkd_request(uri, session_key, method, data=payload, retry=3) + if resp is None: return None - if resp.status >= 200 and resp.status <= 204: - return content + if resp.status_code >= 200 and resp.status_code <= 204: + return resp.text else: msg = "{}, status={}, reason={}, detail={}".format( err_msg, - resp.status, + resp.status_code, resp.reason, - content.decode("utf-8"), + resp.text, ) - if not (method == "GET" and resp.status == 404): + if not (method == "GET" and resp.status_code == 404): log.logger.error(msg) - if resp.status == 404: + if resp.status_code == 404: raise ConfNotExistsException(msg) - if resp.status == 409: + if resp.status_code == 409: raise ConfExistsException(msg) else: - if content and "already exists" in content: + if resp.text and "already exists" in resp.text: raise ConfExistsException(msg) raise ConfRequestException(msg) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/ta_conf_manager.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/conf_manager/ta_conf_manager.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/credentials.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/credentials.py old mode 100755 new mode 100644 index 13e742e..5985bfd --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/credentials.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/credentials.py @@ -104,14 +104,12 @@ def get_session_key(username, password, splunkd_uri="https://localhost:8089"): "password": password, } - response, content = rest.splunkd_request( - eid, None, method="POST", data=postargs - ) + response = rest.splunkd_request(eid, None, method="POST", data=postargs) - if response is None and content is None: + if response is None: raise CredException("Get session key failed.") - xml_obj = xdm.parseString(content) + xml_obj = xdm.parseString(response.text) session_nodes = xml_obj.getElementsByTagName("sessionKey") if not session_nodes: raise CredException("Invalid username or password.") @@ -165,13 +163,13 @@ def _do_update(self, name, password): except CredException: payload = {"password": password} endpoint = self._get_endpoint(name) - response, _ = rest.splunkd_request( + response = rest.splunkd_request( endpoint, self._session_key, method="POST", data=payload ) - if not response or response.status not in (200, 201): + if not response or response.status_code not in (200, 201): raise CredException( "Unable to update password for username={}, status={}".format( - name, response.status + name, response.status_code ) ) @@ -188,10 +186,10 @@ def _create(self, name, str_to_encrypt): } endpoint = self._get_endpoint(name) - resp, content = rest.splunkd_request( + resp = rest.splunkd_request( endpoint, self._session_key, method="POST", data=payload ) - if not resp or resp.status not in (200, 201, "200", "201"): + if not resp or resp.status_code not in (200, 201): raise CredException("Failed to encrypt username {}".format(name)) def delete(self, name, throw=False): @@ -236,14 +234,12 @@ def _delete(self, name, throw=False): """ endpoint = self._get_endpoint(name) - response, content = rest.splunkd_request( - endpoint, self._session_key, method="DELETE" - ) + response = rest.splunkd_request(endpoint, self._session_key, method="DELETE") - if response is not None and response.status in (404, "404"): + if response is not None and response.status_code == 404: if throw: raise CredNotFound("Credential stanza not exits - {}".format(name)) - elif not response or response.status not in (200, 201, "200", "201"): + elif not response or response.status_code not in (200, 201): if throw: raise CredException( "Failed to delete credential stanza {}".format(name) @@ -312,11 +308,9 @@ def _get_all_passwords(self): """ endpoint = self._get_endpoint() - response, content = rest.splunkd_request( - endpoint, self._session_key, method="GET" - ) - if response and response.status in (200, 201, "200", "201") and content: - return xdp.parse_conf_xml_dom(content) + response = rest.splunkd_request(endpoint, self._session_key, method="GET") + if response and response.status_code in (200, 201) and response.text: + return xdp.parse_conf_xml_dom(response.text) raise CredException("Failed to get credentials") def get_clear_password(self, name=None): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/event_writer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/event_writer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/file_monitor.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/file_monitor.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/kv_client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/kv_client.py old mode 100755 new mode 100644 index 8eb2022..806f03c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/kv_client.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/kv_client.py @@ -159,17 +159,15 @@ def _do_request( ): headers = {"Content-Type": content_type} - resp, content = rest.splunkd_request( - uri, self._session_key, method, headers, data - ) - if resp is None and content is None: + resp = rest.splunkd_request(uri, self._session_key, method, headers, data) + if resp is None: raise KVException("Failed uri={}, data={}".format(uri, data)) - if resp.status in (200, 201): - return content - elif resp.status == 409: + if resp.status_code in (200, 201): + return resp.text + elif resp.status_code == 409: raise KVAlreadyExists("{}-{} already exists".format(uri, data)) - elif resp.status == 404: + elif resp.status_code == 404: raise KVNotExists("{}-{} not exists".format(uri, data)) else: raise KVException( diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/modinput.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/modinput.py old mode 100755 new mode 100644 index 04bfd1d..625bdc7 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/modinput.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/modinput.py @@ -64,7 +64,7 @@ def _parse_modinput_configs(root, outer_block, inner_block): confs = root.getElementsByTagName(outer_block) if not confs: log.logger.error("Invalid config, missing %s section", outer_block) - raise Exception("Invalid config, missing %s section".format(outer_block)) + raise Exception(f"Invalid config, missing {outer_block} section") configs = [] stanzas = confs[0].getElementsByTagName(inner_block) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/orphan_process_monitor.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/orphan_process_monitor.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/rest.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/rest.py old mode 100755 new mode 100644 index 4a3515a..4fb10c0 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/rest.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/rest.py @@ -15,23 +15,25 @@ # import json -import urllib.error import urllib.parse -import urllib.request from traceback import format_exc +from typing import Optional -from httplib2 import Http, ProxyInfo, socks +import requests import splunktalib.common.log as log -import splunktalib.common.util as scu def splunkd_request( - splunkd_uri, session_key, method="GET", headers=None, data=None, timeout=30, retry=1 -): - """ - :return: httplib2.Response and content - """ + splunkd_uri, + session_key, + method="GET", + headers=None, + data=None, + timeout=30, + retry=1, + verify=False, +) -> Optional[requests.Response]: headers = headers if headers is not None else {} headers["Authorization"] = "Splunk {}".format(session_key) @@ -49,104 +51,45 @@ def splunkd_request( else: data = urllib.parse.urlencode(data) - http = Http(timeout=timeout, disable_ssl_certificate_validation=True) msg_temp = "Failed to send rest request=%s, errcode=%s, reason=%s" - resp, content = None, None + resp = None for _ in range(retry): try: - resp, content = http.request( - splunkd_uri, method=method, headers=headers, body=data + resp = requests.request( + method=method, + url=splunkd_uri, + data=data, + headers=headers, + timeout=timeout, + verify=verify, ) except Exception: log.logger.error(msg_temp, splunkd_uri, "unknown", format_exc()) else: - if resp.status not in (200, 201): - if not (method == "GET" and resp.status == 404): + if resp.status_code not in (200, 201): + if not (method == "GET" and resp.status_code == 404): log.logger.debug( - msg_temp, splunkd_uri, resp.status, code_to_msg(resp, content) + msg_temp, splunkd_uri, resp.status_code, code_to_msg(resp) ) else: - return resp, content + return resp else: - return resp, content + return resp -def code_to_msg(resp, content): +def code_to_msg(response: requests.Response): code_msg_tbl = { - 400: "Request error. reason={}".format(content), + 400: "Request error. reason={}".format(response.text), 401: "Authentication failure, invalid access credentials.", 402: "In-use license disables this feature.", 403: "Insufficient permission.", 404: "Requested endpoint does not exist.", - 409: "Invalid operation for this endpoint. reason={}".format(content), - 500: "Unspecified internal server error. reason={}".format(content), + 409: "Invalid operation for this endpoint. reason={}".format(response.text), + 500: "Unspecified internal server error. reason={}".format(response.text), 503: ( "Feature is disabled in the configuration file. " - "reason={}".format(content) + "reason={}".format(response.text) ), } - return code_msg_tbl.get(resp.status, content) - - -def build_http_connection(config, timeout=120, disable_ssl_validation=False): - """ - :config: dict like, proxy and account information are in the following - format { - "username": xx, - "password": yy, - "proxy_url": zz, - "proxy_port": aa, - "proxy_username": bb, - "proxy_password": cc, - "proxy_type": http,http_no_tunnel,sock4,sock5, - "proxy_rdns": 0 or 1, - } - :return: Http2.Http object - """ - - proxy_type_to_code = { - "http": socks.PROXY_TYPE_HTTP, - "http_no_tunnel": socks.PROXY_TYPE_HTTP_NO_TUNNEL, - "socks4": socks.PROXY_TYPE_SOCKS4, - "socks5": socks.PROXY_TYPE_SOCKS5, - } - if config.get("proxy_type") in proxy_type_to_code: - proxy_type = proxy_type_to_code[config["proxy_type"]] - else: - proxy_type = socks.PROXY_TYPE_HTTP - - rdns = scu.is_true(config.get("proxy_rdns")) - - proxy_info = None - if config.get("proxy_url") and config.get("proxy_port"): - if config.get("proxy_username") and config.get("proxy_password"): - proxy_info = ProxyInfo( - proxy_type=proxy_type, - proxy_host=config["proxy_url"], - proxy_port=int(config["proxy_port"]), - proxy_user=config["proxy_username"], - proxy_pass=config["proxy_password"], - proxy_rdns=rdns, - ) - else: - proxy_info = ProxyInfo( - proxy_type=proxy_type, - proxy_host=config["proxy_url"], - proxy_port=int(config["proxy_port"]), - proxy_rdns=rdns, - ) - if proxy_info: - http = Http( - proxy_info=proxy_info, - timeout=timeout, - disable_ssl_certificate_validation=disable_ssl_validation, - ) - else: - http = Http( - timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation - ) - - if config.get("username") and config.get("password"): - http.add_credentials(config["username"], config["password"]) - return http + return code_msg_tbl.get(response.status_code, response.text) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/job.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/job.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/scheduler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/schedule/scheduler.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/splunk_cluster.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/splunk_cluster.py old mode 100755 new mode 100644 index f3e8306..73e8aea --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/splunk_cluster.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/splunk_cluster.py @@ -20,14 +20,14 @@ def _do_rest(uri, session_key): - resp, content = rest.splunkd_request(uri, session_key) + resp = rest.splunkd_request(uri, session_key) if resp is None: return None - if resp.status not in (200, 201): + if resp.status_code not in (200, 201): return None - stanza_objs = xdp.parse_conf_xml_dom(content) + stanza_objs = xdp.parse_conf_xml_dom(resp.text) if not stanza_objs: return None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/splunk_platform.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/splunk_platform.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/state_store.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/state_store.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/timer.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/timer.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/timer_queue.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib/timer_queue.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/solnlib-4.4.1.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/LICENSE similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/LICENSE diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/METADATA similarity index 79% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/METADATA index 038b212..2b35724 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: splunktaucclib -Version: 5.0.7 +Version: 6.0.0 Summary: License: Apache-2.0 Author: rfaircloth-splunk @@ -12,6 +12,8 @@ Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 +Requires-Dist: PySocks (>=1.7.1,<2.0.0) +Requires-Dist: requests (>=2.26.0,<3.0.0) Requires-Dist: solnlib (>=4.0.0,<5.0.0) Requires-Dist: splunk-sdk (>=1.6.16,<2.0.0) -Requires-Dist: splunktalib (>=2.0.0,<3.0.0) +Requires-Dist: splunktalib (>=3.0.0,<4.0.0) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/RECORD similarity index 79% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/RECORD rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/RECORD index fdb5d66..7e57cc7 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/RECORD +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/RECORD @@ -1,15 +1,15 @@ -splunktaucclib-5.0.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -splunktaucclib-5.0.7.dist-info/LICENSE,sha256=MAoJOAr_Vegc2uyd5RyaSNkjRZL4p3ItNOwm6sg9PSc,11341 -splunktaucclib-5.0.7.dist-info/METADATA,sha256=y8w5n9SZ29AygE8j_bceDjBHz-CHcwbLsSJSEr2LI1U,618 -splunktaucclib-5.0.7.dist-info/RECORD,, -splunktaucclib-5.0.7.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -splunktaucclib-5.0.7.dist-info/WHEEL,sha256=y3eDiaFVSNTPbgzfNn0nYn5tEn1cX6WrdetDlQM4xWw,83 -splunktaucclib/__init__.py,sha256=URKhlJqHPTXmUwuQyMW7RowE3x8O7ua-XELv3ZNKQvU,598 -splunktaucclib/alert_actions_base.py,sha256=As1A94AB_EPltTSGxxvHZ9v1bcBwQ4i9Hhnz2pNXzXQ,9605 +splunktaucclib-6.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +splunktaucclib-6.0.0.dist-info/LICENSE,sha256=MAoJOAr_Vegc2uyd5RyaSNkjRZL4p3ItNOwm6sg9PSc,11341 +splunktaucclib-6.0.0.dist-info/METADATA,sha256=KdCxpHt14B3p28FftNSjqS1-eIVu7lKocBIGbXvHVuo,700 +splunktaucclib-6.0.0.dist-info/RECORD,, +splunktaucclib-6.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +splunktaucclib-6.0.0.dist-info/WHEEL,sha256=y3eDiaFVSNTPbgzfNn0nYn5tEn1cX6WrdetDlQM4xWw,83 +splunktaucclib/__init__.py,sha256=PQsBUcInm1Zw0M-ox2dcNO1Fql6IdZ2GzpmdDjNXrMk,598 +splunktaucclib/alert_actions_base.py,sha256=AE2sd_YWXC99sr1g4Qr8zzIvRV1eoOt-a4qFO0YMj88,6826 splunktaucclib/cim_actions.py,sha256=xcoy6R4-t96MUPp__BEwzkdJRGKTvxT6DwCVaDDlutA,24625 splunktaucclib/common/__init__.py,sha256=oM7n5derwdnifG4fpufcBhFLXu9NtLQRBRjaq5HUSSg,1625 splunktaucclib/common/log.py,sha256=oVcMTyPPwAibYIyLlgi2SgQMk-5NbpglRROFIZR2qxU,1607 -splunktaucclib/config.py,sha256=l_kNpNMuRvIPiBX0neDQozb71InqZbfxIy9v7r0AD1E,14945 +splunktaucclib/config.py,sha256=69ADcm0hg0xYUKSQwFvCzioVtB2UOOSF-OjP1mE18ME,14532 splunktaucclib/data_collection/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 splunktaucclib/data_collection/ta_checkpoint_manager.py,sha256=VpQnY4l6JQW0Lw1q9BhahIDXS30crvcTM4fQ6Vf3xjE,2423 splunktaucclib/data_collection/ta_config.py,sha256=71oFBy364ZlhKe65mdK-8P1WghLoz63XzxukxJmUg_c,8905 @@ -26,7 +26,7 @@ splunktaucclib/modinput_wrapper/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpg splunktaucclib/modinput_wrapper/base_modinput.py,sha256=dTUBGt18C5gkagJ7Ivlr3f7GiovfEgSMbp5IIa-jSZs,22456 splunktaucclib/rest_handler/__init__.py,sha256=59ORDvD14I_Ksfm0HCcC6fnQLJf9KZNzQT7HXg3ZAcY,622 splunktaucclib/rest_handler/admin_external.py,sha256=zoXakN5RNO0HbtrNOstVxRRVCvRSfSbNptWKpE1iUlY,6452 -splunktaucclib/rest_handler/base.py,sha256=8JWm1SlSWjGbKin_Ii3gDCgbaS6zja_M8f5-GxL1p0o,24282 +splunktaucclib/rest_handler/base.py,sha256=_C9phmBH0rZbfiw3jmX34yB6tdVvpJV-rzLuPYBMQsU,24305 splunktaucclib/rest_handler/base_hook_mixin.py,sha256=8xlN4coLM64Ob8Gf62C3c0NA963qH08QBGG25Cg78pw,1499 splunktaucclib/rest_handler/cred_mgmt.py,sha256=veJhtfgnEkYCP5PxV6i829yqF3FlVD-GDFycBqkJXUw,5783 splunktaucclib/rest_handler/credentials.py,sha256=LdCPALBl8uy6ME4vdEa6B7p2ITP7nYjI-pv7oLoDGOQ,16635 @@ -42,10 +42,10 @@ splunktaucclib/rest_handler/error_ctl.py,sha256=mqSzKTJhYlBWy7PQ5Hq0-ZC1IrtjadC4 splunktaucclib/rest_handler/handler.py,sha256=2nwRlJymbEPta6YWfxaSsZ5OoaQZ-464dPcjmJ2yRZU,13772 splunktaucclib/rest_handler/multimodel.py,sha256=9Ztxot7nypa2_hNWa2jPFETBusJlZ-UnadxwLD-74YE,5916 splunktaucclib/rest_handler/normaliser.py,sha256=Nnf-MC3J3_WGIOCbzuEjdhlUX31Oky1TIjcFAHrPFB4,2952 -splunktaucclib/rest_handler/poster.py,sha256=j0QqhQ9QF-P_6DdM9qbbr_nOPQRSnlUZhUtbGwM5Fzk,6759 +splunktaucclib/rest_handler/poster.py,sha256=TFtX9yephUDqsd0tgg2PRmT_uYob_AvPMswkkphtWSg,6854 splunktaucclib/rest_handler/schema.py,sha256=62gIJdYOWVSnoYBLRUrsUtDsBWfDT_nwsiFD_FuBQn0,1115 -splunktaucclib/rest_handler/teardown.py,sha256=ca84z-P-rBqm8YaDC3aD7vHylqtGrw0ePUgElupjJKM,5025 -splunktaucclib/rest_handler/util.py,sha256=7PklzD_6zIQyyrl0tru25JfgW8PyK9GCa06PWip5_cg,2830 +splunktaucclib/rest_handler/teardown.py,sha256=soR9KUQvnl5gQcOsvEfVLb5b-Q-bHC2J0kNI5g4g4BY,5023 +splunktaucclib/rest_handler/util.py,sha256=OVZVXtOGBOyjbrokMT2TKTT80-ceTwnq6r7I5sJMNqs,4461 splunktaucclib/rest_handler/validator.py,sha256=otCxdaOUvdo1wBhZGFo7KHOFkeg9NU0yaqnVvuCkv5g,9160 splunktaucclib/splunk_aoblib/__init__.py,sha256=TB1W22zY5u1d91XSos8Qa2V5NMCpgLTgIiV2wT4hx3Y,575 splunktaucclib/splunk_aoblib/rest_helper.py,sha256=bPCVWnhlMl-io64XKXzTM0OpYNhtETUSPD74Awaq8Pc,2250 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/REQUESTED b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/WHEEL similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-6.0.0.dist-info/WHEEL diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/__init__.py old mode 100755 new mode 100644 index 39e83fb..04af818 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/__init__.py @@ -14,4 +14,4 @@ # limitations under the License. # -__version__ = "5.0.7" +__version__ = "6.0.0" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/alert_actions_base.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/alert_actions_base.py old mode 100755 new mode 100644 index 621d720..9580f0c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/alert_actions_base.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/alert_actions_base.py @@ -22,6 +22,7 @@ from solnlib import log from splunktaucclib.cim_actions import ModularAction +from splunktaucclib.rest_handler import util from splunktaucclib.splunk_aoblib.rest_helper import TARestHelper from splunktaucclib.splunk_aoblib.setup_util import Setup_Util @@ -114,22 +115,8 @@ def get_proxy(self): return self.setup_util.get_proxy_settings() def _get_proxy_uri(self): - uri = None proxy = self.get_proxy() - if proxy and proxy.get("proxy_url") and proxy.get("proxy_type"): - uri = proxy["proxy_url"] - if proxy.get("proxy_port"): - uri = "{}:{}".format(uri, proxy.get("proxy_port")) - if proxy.get("proxy_username") and proxy.get("proxy_password"): - uri = "{}://{}:{}@{}/".format( - proxy["proxy_type"], - proxy["proxy_username"], - proxy["proxy_password"], - uri, - ) - else: - uri = "{}://{}".format(proxy["proxy_type"], uri) - return uri + return util.get_proxy_uri(proxy) def send_http_request( self, @@ -158,71 +145,10 @@ def send_http_request( ) def build_http_connection(self, config, timeout=120, disable_ssl_validation=False): - from httplib2 import Http, ProxyInfo, socks - - """ - :config: dict like, proxy and account information are in the following - format { - "username": xx, - "password": yy, - "proxy_url": zz, - "proxy_port": aa, - "proxy_username": bb, - "proxy_password": cc, - "proxy_type": http,http_no_tunnel,sock4,sock5, - "proxy_rdns": 0 or 1, - } - :return: Http2.Http object - """ - if not config: - config = {} - - proxy_type_to_code = { - "http": socks.PROXY_TYPE_HTTP, - "http_no_tunnel": socks.PROXY_TYPE_HTTP_NO_TUNNEL, - "socks4": socks.PROXY_TYPE_SOCKS4, - "socks5": socks.PROXY_TYPE_SOCKS5, - } - if config.get("proxy_type") in proxy_type_to_code: - proxy_type = proxy_type_to_code[config["proxy_type"]] - else: - proxy_type = socks.PROXY_TYPE_HTTP - - rdns = config.get("proxy_rdns") - - proxy_info = None - if config.get("proxy_url") and config.get("proxy_port"): - if config.get("proxy_username") and config.get("proxy_password"): - proxy_info = ProxyInfo( - proxy_type=proxy_type, - proxy_host=config["proxy_url"], - proxy_port=int(config["proxy_port"]), - proxy_user=config["proxy_username"], - proxy_pass=config["proxy_password"], - proxy_rdns=rdns, - ) - else: - proxy_info = ProxyInfo( - proxy_type=proxy_type, - proxy_host=config["proxy_url"], - proxy_port=int(config["proxy_port"]), - proxy_rdns=rdns, - ) - if proxy_info: - http = Http( - proxy_info=proxy_info, - timeout=timeout, - disable_ssl_certificate_validation=disable_ssl_validation, - ) - else: - http = Http( - timeout=timeout, - disable_ssl_certificate_validation=disable_ssl_validation, - ) - - if config.get("username") and config.get("password"): - http.add_credentials(config["username"], config["password"]) - return http + raise NotImplementedError( + "Replace the usage of this function to send_http_request function of same class " + "or use requests.request method" + ) def process_event(self, *args, **kwargs): raise NotImplemented() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/cim_actions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/cim_actions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/common/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/common/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/common/log.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/common/log.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/config.py old mode 100755 new mode 100644 index 7a4f139..2e294ef --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/config.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/config.py @@ -127,22 +127,20 @@ def load(self): retries = 4 waiting_time = [1, 2, 2] for retry in range(retries): - resp, cont = splunkd_request( + resp = splunkd_request( splunkd_uri=self.make_uri(ep_id), session_key=self.session_key, data=data, retry=3, ) - if resp is None or resp.status != 200: - msg = 'Fail to load endpoint "{ep_id}" - {err}' "".format( - ep_id=ep_id, err=code_to_msg(resp, cont) if resp else cont - ) + if resp is None or resp.status_code != 200: + msg = f'Fail to load endpoint "{ep_id}" - {code_to_msg(resp)}' log(msg, level=logging.ERROR, need_tb=True) raise ConfigException(msg) try: - ret[ep_id] = self._parse_content(ep_id, cont) + ret[ep_id] = self._parse_content(ep_id, resp.text) except ConfigException as exc: log(exc, level=logging.WARNING, need_tb=True) if retry < retries - 1: @@ -206,22 +204,15 @@ def update_items( continue item_uri = self.make_uri(endpoint_id, item_name=item_name) - resp, cont = splunkd_request( + resp = splunkd_request( splunkd_uri=item_uri, session_key=self.session_key, data=post_data, method="POST", retry=3, ) - if resp is None or resp.status not in (200, 201): - msg = ( - 'Fail to update item "{item}" in endpoint "{ep_id}"' - " - {err}".format( - ep_id=endpoint_id, - item=item_name, - err=code_to_msg(resp, cont) if resp else cont, - ) - ) + if resp is None or resp.status_code not in (200, 201): + msg = f'Fail to update item "{item_name}" in endpoint "{endpoint_id}" - {code_to_msg(resp)}' log(msg, level=logging.ERROR) if raise_if_failed: raise ConfigException(msg) @@ -365,8 +356,8 @@ def load_value(self, endpoint_id, item_name, fname, fval): def try_fix_corrupted_json(self, corrupted_json, value_err): """ - When L3 (one of our customers) was testing, they encountered a bug that 'access_token_encrypted' or 'refresh_token' got corrupted when it was saved in the conf file - (because of the way how we save encrypted data). + A bug was encountered that 'access_token_encrypted' or 'refresh_token' + got corrupted when it was saved in the conf file """ # value_err.message is in this format: # Extra data: line 1 column 2720 - line 1 column 2941 (char 2719 - 2940) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_checkpoint_manager.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_checkpoint_manager.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_config.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_config.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_consts.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_consts.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_client.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_client.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_collector.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_collector.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_loader.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_data_loader.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_helper.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_helper.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_mod_input.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/data_collection/ta_mod_input.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/configuration.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/configuration.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/schema.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/global_config/schema.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/modinput_wrapper/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/modinput_wrapper/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/modinput_wrapper/base_modinput.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/modinput_wrapper/base_modinput.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/admin_external.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/admin_external.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/base.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/base.py old mode 100755 new mode 100644 index eb283ac..6500ce2 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/base.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/base.py @@ -21,7 +21,6 @@ import itertools import json import logging -import sys from inspect import ismethod from os import path as op @@ -68,15 +67,15 @@ def user_caps(mgmt_uri, session_key): """ url = mgmt_uri + "/services/authentication/current-context" - resp, cont = splunkd_request( + resp = splunkd_request( url, session_key, method="GET", data={"output_mode": "json"}, retry=3 ) if resp is None: RH_Err.ctl(500, logging.ERROR, "Fail to get capabilities of sessioned user") - elif resp.status not in (200, "200"): - RH_Err.ctl(resp.status, logging.ERROR, cont) + elif resp.status_code != 200: + RH_Err.ctl(resp.status_code, logging.ERROR, resp.text) - cont = json.loads(cont) + cont = resp.json() caps = cont["entry"][0]["content"]["capabilities"] return set(caps) @@ -286,9 +285,9 @@ def handleACL(self, confInfo): self.handleList(confInfo) def handleCreate(self, confInfo): - try: + try: # nosemgrep: gitlab.bandit.B110 self.get(self.callerArgs.id) - except: + except Exception: pass else: RH_Err.ctl( diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/base_hook_mixin.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/base_hook_mixin.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/cred_mgmt.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/cred_mgmt.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/credentials.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/credentials.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/datainput.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/datainput.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/eai.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/eai.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/converter.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/converter.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/field.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/field.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/validator.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/endpoint/validator.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/entity.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/entity.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/error.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/error.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/error_ctl.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/error_ctl.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/handler.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/handler.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/multimodel.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/multimodel.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/normaliser.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/normaliser.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/poster.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/poster.py old mode 100755 new mode 100644 index b84ec7e..6e75177 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/poster.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/poster.py @@ -15,17 +15,17 @@ # -import json import re import urllib.error import urllib.parse import urllib.request +import requests from splunk import admin, rest from splunktalib.common.util import is_true -from splunktalib.rest import build_http_connection, code_to_msg, splunkd_request +from splunktalib.rest import code_to_msg, splunkd_request -from . import base +from . import base, util from .error_ctl import RestHandlerError as RH_Err @@ -75,7 +75,8 @@ def handleEdit(self, confInfo): app=app, ) proxy_enabled = proxy_info.get("proxy_enabled", False) - http = build_http_connection(proxy_info if proxy_enabled else {}) + proxy_uri = util.get_proxy_uri(proxy_info if proxy_enabled else {}) + proxies = {"http": proxy_uri, "https": proxy_uri} try: url = self.callerArgs.data["splunk_poster_url"][0] for regex in self.allowedURLs: @@ -97,15 +98,18 @@ def handleEdit(self, confInfo): "Content-Type": "application/x-www-form-urlencoded", } - resp, content = http.request( - url, + resp = requests.request( # nosemgrep: python.requests.best-practice.use-raise-for-status.use-raise-for-status # noqa: E501 method=method, + url=url, headers=headers, - body=urllib.parse.urlencode(payload), + data=urllib.parse.urlencode(payload), + timeout=120, + proxies=proxies, + verify=True, ) - content = json.loads(content) - if resp.status not in (200, 201, "200", "201"): - RH_Err.ctl(resp.status, msgx=content) + content = resp.json() + if resp.status_code not in (200, 201): + RH_Err.ctl(resp.status_code, msgx=content) for key, val in content.items(): confInfo[self.callerArgs.id][key] = val @@ -167,17 +171,12 @@ def getProxyInfo(self, splunkdMgmtUri, sessionKey, user, app): proxyInfoEndpoint=self.proxyInfoEndpoint, ) data = {"output_mode": "json", "--get-clear-credential--": "1"} - resp, cont = splunkd_request(url, sessionKey, data=data, retry=3) - if resp is None or resp.status != 200: - RH_Err.ctl( - 1104, - msgx="failed to load proxy info. {err}".format( - err=code_to_msg(resp, cont) if resp else cont - ), - ) + resp = splunkd_request(url, sessionKey, data=data, retry=3) + if resp is None or resp.status_code != 200: + RH_Err.ctl(1104, msgx=f"failed to load proxy info. {code_to_msg(resp)}") try: - proxy_info = json.loads(cont)["entry"][0]["content"] + proxy_info = resp.json()["entry"][0]["content"] except IndexError | KeyError: proxy_info = {} diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/schema.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/schema.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/teardown.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/teardown.py old mode 100755 new mode 100644 index 4c50522..2aa3690 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/teardown.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/teardown.py @@ -99,7 +99,7 @@ def make_uri(self, endpoint, entry=None): def clean(self, endpoint): url = self.make_uri(endpoint) + "?count=-1" - resp, cont = splunkd_request( + resp = splunkd_request( url, self.getSessionKey(), method="GET", @@ -109,10 +109,10 @@ def clean(self, endpoint): if resp is None: return {url: "Unknown reason"} - if resp.status != 200: - return {url: self.convertErrMsg(cont)} + if resp.status_code != 200: + return {url: self.convertErrMsg(resp.text)} - cont = json.loads(cont) + cont = resp.json() ents = [ent["name"] for ent in cont.get("entry", [])] errs = {} for ent in ents: @@ -120,7 +120,7 @@ def clean(self, endpoint): if not self.distinguish(endpoint, ent, **args): continue url_ent = self.make_uri(endpoint, entry=ent) - resp, cont = splunkd_request( + resp = splunkd_request( url_ent, self.getSessionKey(), method="DELETE", @@ -130,7 +130,7 @@ def clean(self, endpoint): if resp is None: errs[url_ent] = "Unknown reason" - if resp.status != 200: + if resp.status_code != 200: errs[url_ent] = self.convertErrMsg(cont) return errs diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/util.py old mode 100755 new mode 100644 index 8b11a82..23abd06 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/util.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/util.py @@ -15,17 +15,20 @@ # import os.path +from typing import Any, Dict, Optional + +import solnlib.utils as utils from .error import RestError try: from splunk import admin -except: +except Exception: print("Some functions will not be available outside of a splunk hosted process") try: from splunktalib.common import util -except: +except Exception: print('Python Lib for Splunk add-on "splunktalib" is required') raise BaseException() @@ -95,3 +98,44 @@ def remove_http_proxy_env_vars(): del os.environ[k] elif k.upper() in os.environ: del os.environ[k.upper()] + + +def get_proxy_uri(proxy: Dict[str, Any]) -> Optional[str]: + """ + :proxy: dict like, proxy information are in the following + format { + "proxy_url": zz, + "proxy_port": aa, + "proxy_username": bb, + "proxy_password": cc, + "proxy_type": http,sock4,sock5, + "proxy_rdns": 0 or 1, + } + :return: proxy uri or None + """ + uri = None + if proxy and proxy.get("proxy_url") and proxy.get("proxy_type"): + uri = proxy["proxy_url"] + # socks5 causes the DNS resolution to happen on the client + # socks5h causes the DNS resolution to happen on the proxy server + if proxy.get("proxy_type") == "socks5" and utils.is_true( + proxy.get("proxy_rdns") + ): + proxy["proxy_type"] = "socks5h" + # setting default value of proxy_type to "http" if + # its value is not from ["http", "socks4", "socks5"] + if proxy.get("proxy_type") not in ["http", "socks4", "socks5"]: + proxy["proxy_type"] = "http" + if proxy.get("proxy_port"): + uri = "{}:{}".format(uri, proxy.get("proxy_port")) + if proxy.get("proxy_username") and proxy.get("proxy_password"): + uri = "{}://{}:{}@{}/".format( + proxy["proxy_type"], + proxy["proxy_username"], + proxy["proxy_password"], + uri, + ) + else: + uri = "{}://{}".format(proxy["proxy_type"], uri) + + return uri diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/validator.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/rest_handler/validator.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/rest_helper.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/rest_helper.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/rest_migration.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/rest_migration.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/setup_util.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/setup_util.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/utility.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib/splunk_aoblib/utility.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/METADATA deleted file mode 100644 index fe10dfd..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/METADATA +++ /dev/null @@ -1,35 +0,0 @@ -Metadata-Version: 2.1 -Name: typing_extensions -Version: 4.0.1 -Summary: Backported and Experimental Type Hints for Python 3.6+ -Keywords: annotations,backport,checker,checking,function,hinting,hints,type,typechecking,typehinting,typehints,typing -Author-email: "Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Michael Lee" -Requires-Python: >=3.6 -Description-Content-Type: text/x-rst -Classifier: Development Status :: 3 - Alpha -Classifier: Environment :: Console -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: Python Software Foundation License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Topic :: Software Development -Project-URL: Home, https://github.com/python/typing/blob/master/typing_extensions/README.rst - -Typing Extensions -- Backported and Experimental Type Hints for Python - -The ``typing`` module was added to the standard library in Python 3.5, but -many new features have been added to the module since then. -This means users of older Python versions who are unable to upgrade will not be -able to take advantage of new types added to the ``typing`` module, such as -``typing.Protocol`` or ``typing.TypedDict``. - -The ``typing_extensions`` module contains backports of these changes. -Experimental types that may eventually be added to the ``typing`` -module are also included in ``typing_extensions``. - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/RECORD deleted file mode 100644 index eb7c95b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/RECORD +++ /dev/null @@ -1,6 +0,0 @@ -typing_extensions-4.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -typing_extensions-4.0.1.dist-info/LICENSE,sha256=_xfOlOECAk3raHc-scx0ynbaTmWPNzUx8Kwi1oprsa0,12755 -typing_extensions-4.0.1.dist-info/METADATA,sha256=iZ_5HONZZBXtF4kroz-IPZYIl9M8IE1B00R82dWcBqE,1736 -typing_extensions-4.0.1.dist-info/RECORD,, -typing_extensions-4.0.1.dist-info/WHEEL,sha256=LVOPL_YDMEiGvRLgDK1hLkfhFCnTcxcAYZJtpNFses0,81 -typing_extensions.py,sha256=1uqi_RSlI7gos4eJB_NEV3d5wQwzTUQHd3_jrkbTo8Q,87149 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunk_sdk-1.6.16.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/LICENSE similarity index 94% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/LICENSE index 583f9f6..1df6b3b 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.0.1.dist-info/LICENSE +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/LICENSE @@ -13,12 +13,11 @@ software. In May 2000, Guido and the Python core development team moved to BeOpen.com to form the BeOpen PythonLabs team. In October of the same -year, the PythonLabs team moved to Digital Creations (now Zope -Corporation, see http://www.zope.com). In 2001, the Python Software -Foundation (PSF, see http://www.python.org/psf/) was formed, a -non-profit organization created specifically to own Python-related -Intellectual Property. Zope Corporation is a sponsoring member of -the PSF. +year, the PythonLabs team moved to Digital Creations, which became +Zope Corporation. In 2001, the Python Software Foundation (PSF, see +https://www.python.org/psf/) was formed, a non-profit organization +created specifically to own Python-related Intellectual Property. +Zope Corporation was a sponsoring member of the PSF. All Python releases are Open Source (see http://www.opensource.org for the Open Source Definition). Historically, most, but not all, Python @@ -74,8 +73,9 @@ analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are -retained in Python alone or in any derivative version prepared by Licensee. +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make @@ -180,9 +180,9 @@ version prepared by Licensee. Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): "Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement. This Agreement together with -Python 1.6.1 may be located on the Internet using the following +Python 1.6.1 may be located on the internet using the following unique, persistent identifier (known as a handle): 1895.22/1013. This -Agreement may also be obtained from a proxy server on the Internet +Agreement may also be obtained from a proxy server on the internet using the following URL: http://hdl.handle.net/1895.22/1013". 3. In the event Licensee prepares a derivative work that is based on diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/METADATA new file mode 100644 index 0000000..f147b26 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/METADATA @@ -0,0 +1,190 @@ +Metadata-Version: 2.1 +Name: typing_extensions +Version: 4.5.0 +Summary: Backported and Experimental Type Hints for Python 3.7+ +Keywords: annotations,backport,checker,checking,function,hinting,hints,type,typechecking,typehinting,typehints,typing +Author-email: "Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Michael Lee" +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development +Project-URL: Bug Tracker, https://github.com/python/typing_extensions/issues +Project-URL: Changes, https://github.com/python/typing_extensions/blob/main/CHANGELOG.md +Project-URL: Documentation, https://typing.readthedocs.io/ +Project-URL: Home, https://github.com/python/typing_extensions +Project-URL: Q & A, https://github.com/python/typing/discussions +Project-URL: Repository, https://github.com/python/typing_extensions + +# Typing Extensions + +[![Chat at https://gitter.im/python/typing](https://badges.gitter.im/python/typing.svg)](https://gitter.im/python/typing) + +## Overview + +The `typing_extensions` module serves two related purposes: + +- Enable use of new type system features on older Python versions. For example, + `typing.TypeGuard` is new in Python 3.10, but `typing_extensions` allows + users on previous Python versions to use it too. +- Enable experimentation with new type system PEPs before they are accepted and + added to the `typing` module. + +New features may be added to `typing_extensions` as soon as they are specified +in a PEP that has been added to the [python/peps](https://github.com/python/peps) +repository. If the PEP is accepted, the feature will then be added to `typing` +for the next CPython release. No typing PEP has been rejected so far, so we +haven't yet figured out how to deal with that possibility. + +Starting with version 4.0.0, `typing_extensions` uses +[Semantic Versioning](https://semver.org/). The +major version is incremented for all backwards-incompatible changes. +Therefore, it's safe to depend +on `typing_extensions` like this: `typing_extensions >=x.y, <(x+1)`, +where `x.y` is the first version that includes all features you need. + +`typing_extensions` supports Python versions 3.7 and higher. In the future, +support for older Python versions will be dropped some time after that version +reaches end of life. + +## Included items + +This module currently contains the following: + +- Experimental features + + - `override` (see [PEP 698](https://peps.python.org/pep-0698/)) + - The `default=` argument to `TypeVar`, `ParamSpec`, and `TypeVarTuple` (see [PEP 696](https://peps.python.org/pep-0696/)) + - The `infer_variance=` argument to `TypeVar` (see [PEP 695](https://peps.python.org/pep-0695/)) + - The `@deprecated` decorator (see [PEP 702](https://peps.python.org/pep-0702/)) + +- In `typing` since Python 3.11 + + - `assert_never` + - `assert_type` + - `clear_overloads` + - `@dataclass_transform()` (see [PEP 681](https://peps.python.org/pep-0681/)) + - `get_overloads` + - `LiteralString` (see [PEP 675](https://peps.python.org/pep-0675/)) + - `Never` + - `NotRequired` (see [PEP 655](https://peps.python.org/pep-0655/)) + - `reveal_type` + - `Required` (see [PEP 655](https://peps.python.org/pep-0655/)) + - `Self` (see [PEP 673](https://peps.python.org/pep-0673/)) + - `TypeVarTuple` (see [PEP 646](https://peps.python.org/pep-0646/); the `typing_extensions` version supports the `default=` argument from [PEP 696](https://peps.python.org/pep-0696/)) + - `Unpack` (see [PEP 646](https://peps.python.org/pep-0646/)) + +- In `typing` since Python 3.10 + + - `Concatenate` (see [PEP 612](https://peps.python.org/pep-0612/)) + - `ParamSpec` (see [PEP 612](https://peps.python.org/pep-0612/); the `typing_extensions` version supports the `default=` argument from [PEP 696](https://peps.python.org/pep-0696/)) + - `ParamSpecArgs` (see [PEP 612](https://peps.python.org/pep-0612/)) + - `ParamSpecKwargs` (see [PEP 612](https://peps.python.org/pep-0612/)) + - `TypeAlias` (see [PEP 613](https://peps.python.org/pep-0613/)) + - `TypeGuard` (see [PEP 647](https://peps.python.org/pep-0647/)) + - `is_typeddict` + +- In `typing` since Python 3.9 + + - `Annotated` (see [PEP 593](https://peps.python.org/pep-0593/)) + +- In `typing` since Python 3.8 + + - `final` (see [PEP 591](https://peps.python.org/pep-0591/)) + - `Final` (see [PEP 591](https://peps.python.org/pep-0591/)) + - `Literal` (see [PEP 586](https://peps.python.org/pep-0586/)) + - `Protocol` (see [PEP 544](https://peps.python.org/pep-0544/)) + - `runtime_checkable` (see [PEP 544](https://peps.python.org/pep-0544/)) + - `TypedDict` (see [PEP 589](https://peps.python.org/pep-0589/)) + - `get_origin` (`typing_extensions` provides this function only in Python 3.7+) + - `get_args` (`typing_extensions` provides this function only in Python 3.7+) + +- In `typing` since Python 3.7 + + - `OrderedDict` + +- In `typing` since Python 3.5 or 3.6 (see [the typing documentation](https://docs.python.org/3.10/library/typing.html) for details) + + - `AsyncContextManager` + - `AsyncGenerator` + - `AsyncIterable` + - `AsyncIterator` + - `Awaitable` + - `ChainMap` + - `ClassVar` (see [PEP 526](https://peps.python.org/pep-0526/)) + - `ContextManager` + - `Coroutine` + - `Counter` + - `DefaultDict` + - `Deque` + - `NewType` + - `NoReturn` + - `overload` + - `Text` + - `Type` + - `TYPE_CHECKING` + - `get_type_hints` + +- The following have always been present in `typing`, but the `typing_extensions` versions provide + additional features: + + - `Any` (supports inheritance since Python 3.11) + - `NamedTuple` (supports multiple inheritance with `Generic` since Python 3.11) + - `TypeVar` (see PEPs [695](https://peps.python.org/pep-0695/) and [696](https://peps.python.org/pep-0696/)) + +# Other Notes and Limitations + +Certain objects were changed after they were added to `typing`, and +`typing_extensions` provides a backport even on newer Python versions: + +- `TypedDict` does not store runtime information + about which (if any) keys are non-required in Python 3.8, and does not + honor the `total` keyword with old-style `TypedDict()` in Python + 3.9.0 and 3.9.1. `TypedDict` also does not support multiple inheritance + with `typing.Generic` on Python <3.11. +- `get_origin` and `get_args` lack support for `Annotated` in + Python 3.8 and lack support for `ParamSpecArgs` and `ParamSpecKwargs` + in 3.9. +- `@final` was changed in Python 3.11 to set the `.__final__` attribute. +- `@overload` was changed in Python 3.11 to make function overloads + introspectable at runtime. In order to access overloads with + `typing_extensions.get_overloads()`, you must use + `@typing_extensions.overload`. +- `NamedTuple` was changed in Python 3.11 to allow for multiple inheritance + with `typing.Generic`. +- Since Python 3.11, it has been possible to inherit from `Any` at + runtime. `typing_extensions.Any` also provides this capability. +- `TypeVar` gains two additional parameters, `default=` and `infer_variance=`, + in the draft PEPs [695](https://peps.python.org/pep-0695/) and [696](https://peps.python.org/pep-0696/), which are being considered for inclusion + in Python 3.12. + +There are a few types whose interface was modified between different +versions of typing. For example, `typing.Sequence` was modified to +subclass `typing.Reversible` as of Python 3.5.3. + +These changes are _not_ backported to prevent subtle compatibility +issues when mixing the differing implementations of modified classes. + +Certain types have incorrect runtime behavior due to limitations of older +versions of the typing module: + +- `ParamSpec` and `Concatenate` will not work with `get_args` and + `get_origin`. Certain [PEP 612](https://peps.python.org/pep-0612/) special cases in user-defined + `Generic`s are also not available. + +These types are only guaranteed to work for static type checking. + +## Running tests + +To run tests, navigate into the appropriate source directory and run +`test_typing_extensions.py`. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/RECORD new file mode 100644 index 0000000..dd4898f --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/RECORD @@ -0,0 +1,6 @@ +typing_extensions-4.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +typing_extensions-4.5.0.dist-info/LICENSE,sha256=x6-2XnVXB7n7kEhziaF20-09ADHVExr95FwjcV_16JE,12787 +typing_extensions-4.5.0.dist-info/METADATA,sha256=ykVaNaZyyp7LL1wHjigAwLETiPfZAjr-mjymkqShbPQ,8529 +typing_extensions-4.5.0.dist-info/RECORD,, +typing_extensions-4.5.0.dist-info/WHEEL,sha256=rSgq_JpHF9fHR1lx53qwg_1-2LypZE_qmcuXbVUq948,81 +typing_extensions.py,sha256=efQTxJ-f4bVI3kR6ps_R9RZAU-78EChvgDSqk7_60es,84065 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/WHEEL similarity index 71% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/WHEEL index 3764652..db4a255 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions-4.5.0.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 -Generator: poetry 1.0.7 +Generator: flit 3.8.0 Root-Is-Purelib: true Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions.py old mode 100755 new mode 100644 index 9f1c7aa..6ae0c34 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/typing_extensions.py @@ -1,52 +1,30 @@ import abc import collections import collections.abc +import functools +import inspect import operator import sys +import types as _types import typing +import warnings -# After PEP 560, internal typing API was substantially reworked. -# This is especially important for Protocol class which uses internal APIs -# quite extensively. -PEP_560 = sys.version_info[:3] >= (3, 7, 0) -if PEP_560: - GenericMeta = type -else: - # 3.6 - from typing import GenericMeta, _type_vars # noqa - -# The two functions below are copies of typing internal helpers. -# They are needed by _ProtocolMeta - - -def _no_slots_copy(dct): - dict_copy = dict(dct) - if '__slots__' in dict_copy: - for slot in dict_copy['__slots__']: - dict_copy.pop(slot, None) - return dict_copy - - -def _check_generic(cls, parameters): - if not cls.__parameters__: - raise TypeError(f"{cls} is not a generic class") - alen = len(parameters) - elen = len(cls.__parameters__) - if alen != elen: - raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};" - f" actual {alen}, expected {elen}") - - -# Please keep __all__ alphabetized within each category. __all__ = [ # Super-special typing primitives. + 'Any', 'ClassVar', 'Concatenate', 'Final', + 'LiteralString', 'ParamSpec', + 'ParamSpecArgs', + 'ParamSpecKwargs', 'Self', 'Type', + 'TypeVar', + 'TypeVarTuple', + 'Unpack', # ABCs (from collections.abc). 'Awaitable', @@ -62,6 +40,7 @@ def _check_generic(cls, parameters): 'Counter', 'Deque', 'DefaultDict', + 'NamedTuple', 'OrderedDict', 'TypedDict', @@ -70,49 +49,102 @@ def _check_generic(cls, parameters): # One-off things. 'Annotated', + 'assert_never', + 'assert_type', + 'clear_overloads', + 'dataclass_transform', + 'deprecated', + 'get_overloads', 'final', + 'get_args', + 'get_origin', + 'get_type_hints', 'IntVar', + 'is_typeddict', 'Literal', 'NewType', 'overload', + 'override', 'Protocol', + 'reveal_type', 'runtime', 'runtime_checkable', 'Text', 'TypeAlias', 'TypeGuard', 'TYPE_CHECKING', + 'Never', + 'NoReturn', + 'Required', + 'NotRequired', ] -if PEP_560: - __all__.extend(["get_args", "get_origin", "get_type_hints"]) +# for backward compatibility +PEP_560 = True +GenericMeta = type -# 3.6.2+ -if hasattr(typing, 'NoReturn'): - NoReturn = typing.NoReturn -# 3.6.0-3.6.1 -else: - class _NoReturn(typing._FinalTypingBase, _root=True): - """Special type indicating functions that never return. - Example:: +# The functions below are modified copies of typing internal helpers. +# They are needed by _ProtocolMeta and they provide support for PEP 646. - from typing import NoReturn +_marker = object() - def stop() -> NoReturn: - raise Exception('no way') - This type is invalid in other positions, e.g., ``List[NoReturn]`` - will fail in static type checkers. - """ - __slots__ = () +def _check_generic(cls, parameters, elen=_marker): + """Check correct count for parameters of a generic cls (internal helper). + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + if elen is _marker: + if not hasattr(cls, "__parameters__") or not cls.__parameters__: + raise TypeError(f"{cls} is not a generic class") + elen = len(cls.__parameters__) + alen = len(parameters) + if alen != elen: + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) + if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): + return + raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};" + f" actual {alen}, expected {elen}") - def __instancecheck__(self, obj): - raise TypeError("NoReturn cannot be used with isinstance().") - def __subclasscheck__(self, cls): - raise TypeError("NoReturn cannot be used with issubclass().") +if sys.version_info >= (3, 10): + def _should_collect_from_parameters(t): + return isinstance( + t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType) + ) +elif sys.version_info >= (3, 9): + def _should_collect_from_parameters(t): + return isinstance(t, (typing._GenericAlias, _types.GenericAlias)) +else: + def _should_collect_from_parameters(t): + return isinstance(t, typing._GenericAlias) and not t._special + - NoReturn = _NoReturn(_root=True) +def _collect_type_vars(types, typevar_types=None): + """Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: + + _collect_type_vars((T, List[S, T])) == (T, S) + """ + if typevar_types is None: + typevar_types = typing.TypeVar + tvars = [] + for t in types: + if ( + isinstance(t, typevar_types) and + t not in tvars and + not _is_unpack(t) + ): + tvars.append(t) + if _should_collect_from_parameters(t): + tvars.extend([t for t in t.__parameters__ if t not in tvars]) + return tuple(tvars) + + +NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. # (These are not for export.) @@ -122,6 +154,37 @@ def __subclasscheck__(self, cls): T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. + +if sys.version_info >= (3, 11): + from typing import Any +else: + + class _AnyMeta(type): + def __instancecheck__(self, obj): + if self is Any: + raise TypeError("typing_extensions.Any cannot be used with isinstance()") + return super().__instancecheck__(obj) + + def __repr__(self): + if self is Any: + return "typing_extensions.Any" + return super().__repr__() + + class Any(metaclass=_AnyMeta): + """Special type indicating an unconstrained type. + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + checks. + """ + def __new__(cls, *args, **kwargs): + if cls is Any: + raise TypeError("Any cannot be instantiated") + return super().__new__(cls, *args, **kwargs) + + ClassVar = typing.ClassVar # On older versions of typing there is an internal class named "Final". @@ -129,7 +192,7 @@ def __subclasscheck__(self, cls): if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): Final = typing.Final # 3.7 -elif sys.version_info[:2] >= (3, 7): +else: class _FinalForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -137,7 +200,7 @@ def __repr__(self): def __getitem__(self, parameters): item = typing._type_check(parameters, - f'{self._name} accepts only single type') + f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) Final = _FinalForm('Final', @@ -154,67 +217,13 @@ class FastConnector(Connection): TIMEOUT = 1 # Error reported by type checker There is no runtime checking of these properties.""") -# 3.6 -else: - class _Final(typing._FinalTypingBase, _root=True): - """A special typing construct to indicate that a name - cannot be re-assigned or overridden in a subclass. - For example: - - MAX_SIZE: Final = 9000 - MAX_SIZE += 1 # Error reported by type checker - - class Connection: - TIMEOUT: Final[int] = 10 - class FastConnector(Connection): - TIMEOUT = 1 # Error reported by type checker - - There is no runtime checking of these properties. - """ - - __slots__ = ('__type__',) - - def __init__(self, tp=None, **kwds): - self.__type__ = tp - - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - f'{cls.__name__[1:]} accepts only single type.'), - _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) - - def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += f'[{typing._type_repr(self.__type__)}]' - return r - - def __hash__(self): - return hash((type(self).__name__, self.__type__)) - - def __eq__(self, other): - if not isinstance(other, _Final): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ - return self is other - - Final = _Final(_root=True) - -# 3.8+ -if hasattr(typing, 'final'): +if sys.version_info >= (3, 11): final = typing.final -# 3.6-3.7 else: + # @final exists in 3.8+, but we backport it for all versions + # before 3.11 to keep support for the __final__ attribute. + # See https://bugs.python.org/issue46342 def final(f): """This decorator can be used to indicate to type checkers that the decorated method cannot be overridden, and decorated class @@ -233,8 +242,17 @@ class Leaf: class Other(Leaf): # Error reported by type checker ... - There is no runtime checking of these properties. + There is no runtime checking of these properties. The decorator + sets the ``__final__`` attribute to ``True`` on the decorated object + to allow runtime introspection. """ + try: + f.__final__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass return f @@ -246,7 +264,7 @@ def IntVar(name): if hasattr(typing, 'Literal'): Literal = typing.Literal # 3.7: -elif sys.version_info[:2] >= (3, 7): +else: class _LiteralForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -268,59 +286,75 @@ def __getitem__(self, parameters): Literal[...] cannot be subclassed. There is no runtime checking verifying that the parameter is actually a value instead of a type.""") -# 3.6: -else: - class _Literal(typing._FinalTypingBase, _root=True): - """A type that can be used to indicate to type checkers that the - corresponding value has a value literally equivalent to the - provided parameter. For example: - - var: Literal[4] = 4 - - The type checker understands that 'var' is literally equal to the - value 4 and no other value. - Literal[...] cannot be subclassed. There is no runtime checking - verifying that the parameter is actually a value instead of a type. - """ - - __slots__ = ('__values__',) - - def __init__(self, values=None, **kwds): - self.__values__ = values - - def __getitem__(self, values): - cls = type(self) - if self.__values__ is None: - if not isinstance(values, tuple): - values = (values,) - return cls(values, _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - - def _eval_type(self, globalns, localns): - return self - - def __repr__(self): - r = super().__repr__() - if self.__values__ is not None: - r += f'[{", ".join(map(typing._type_repr, self.__values__))}]' - return r - def __hash__(self): - return hash((type(self).__name__, self.__values__)) +_overload_dummy = typing._overload_dummy # noqa - def __eq__(self, other): - if not isinstance(other, _Literal): - return NotImplemented - if self.__values__ is not None: - return self.__values__ == other.__values__ - return self is other - Literal = _Literal(_root=True) +if hasattr(typing, "get_overloads"): # 3.11+ + overload = typing.overload + get_overloads = typing.get_overloads + clear_overloads = typing.clear_overloads +else: + # {module: {qualname: {firstlineno: func}}} + _overload_registry = collections.defaultdict( + functools.partial(collections.defaultdict, dict) + ) + + def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + + The overloads for a function can be retrieved at runtime using the + get_overloads() function. + """ + # classmethod and staticmethod + f = getattr(func, "__func__", func) + try: + _overload_registry[f.__module__][f.__qualname__][ + f.__code__.co_firstlineno + ] = func + except AttributeError: + # Not a normal function; ignore. + pass + return _overload_dummy + def get_overloads(func): + """Return all defined overloads for *func* as a sequence.""" + # classmethod and staticmethod + f = getattr(func, "__func__", func) + if f.__module__ not in _overload_registry: + return [] + mod_dict = _overload_registry[f.__module__] + if f.__qualname__ not in mod_dict: + return [] + return list(mod_dict[f.__qualname__].values()) -_overload_dummy = typing._overload_dummy # noqa -overload = typing.overload + def clear_overloads(): + """Clear all overloads in the registry.""" + _overload_registry.clear() # This is not a real generic class. Don't use outside annotations. @@ -330,154 +364,30 @@ def __eq__(self, other): # A few are simply re-exported for completeness. -class _ExtensionsGenericMeta(GenericMeta): - def __subclasscheck__(self, subclass): - """This mimics a more modern GenericMeta.__subclasscheck__() logic - (that does not have problems with recursion) to work around interactions - between collections, typing, and typing_extensions on older - versions of Python, see https://github.com/python/typing/issues/501. - """ - if self.__origin__ is not None: - if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: - raise TypeError("Parameterized generics cannot be used with class " - "or instance checks") - return False - if not self.__extra__: - return super().__subclasscheck__(subclass) - res = self.__extra__.__subclasshook__(subclass) - if res is not NotImplemented: - return res - if self.__extra__ in subclass.__mro__: - return True - for scls in self.__extra__.__subclasses__(): - if isinstance(scls, GenericMeta): - continue - if issubclass(subclass, scls): - return True - return False - - Awaitable = typing.Awaitable Coroutine = typing.Coroutine AsyncIterable = typing.AsyncIterable AsyncIterator = typing.AsyncIterator - -# 3.6.1+ -if hasattr(typing, 'Deque'): - Deque = typing.Deque -# 3.6.0 -else: - class Deque(collections.deque, typing.MutableSequence[T], - metaclass=_ExtensionsGenericMeta, - extra=collections.deque): - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is Deque: - return collections.deque(*args, **kwds) - return typing._generic_new(collections.deque, cls, *args, **kwds) - +Deque = typing.Deque ContextManager = typing.ContextManager -# 3.6.2+ -if hasattr(typing, 'AsyncContextManager'): - AsyncContextManager = typing.AsyncContextManager -# 3.6.0-3.6.1 -else: - from _collections_abc import _check_methods as _check_methods_in_mro # noqa - - class AsyncContextManager(typing.Generic[T_co]): - __slots__ = () - - async def __aenter__(self): - return self - - @abc.abstractmethod - async def __aexit__(self, exc_type, exc_value, traceback): - return None - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncContextManager: - return _check_methods_in_mro(C, "__aenter__", "__aexit__") - return NotImplemented - +AsyncContextManager = typing.AsyncContextManager DefaultDict = typing.DefaultDict # 3.7.2+ if hasattr(typing, 'OrderedDict'): OrderedDict = typing.OrderedDict # 3.7.0-3.7.2 -elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2): - OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) -# 3.6 -else: - class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT], - metaclass=_ExtensionsGenericMeta, - extra=collections.OrderedDict): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is OrderedDict: - return collections.OrderedDict(*args, **kwds) - return typing._generic_new(collections.OrderedDict, cls, *args, **kwds) - -# 3.6.2+ -if hasattr(typing, 'Counter'): - Counter = typing.Counter -# 3.6.0-3.6.1 -else: - class Counter(collections.Counter, - typing.Dict[T, int], - metaclass=_ExtensionsGenericMeta, extra=collections.Counter): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is Counter: - return collections.Counter(*args, **kwds) - return typing._generic_new(collections.Counter, cls, *args, **kwds) - -# 3.6.1+ -if hasattr(typing, 'ChainMap'): - ChainMap = typing.ChainMap -elif hasattr(collections, 'ChainMap'): - class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT], - metaclass=_ExtensionsGenericMeta, - extra=collections.ChainMap): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is ChainMap: - return collections.ChainMap(*args, **kwds) - return typing._generic_new(collections.ChainMap, cls, *args, **kwds) - -# 3.6.1+ -if hasattr(typing, 'AsyncGenerator'): - AsyncGenerator = typing.AsyncGenerator -# 3.6.0 else: - class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra], - metaclass=_ExtensionsGenericMeta, - extra=collections.abc.AsyncGenerator): - __slots__ = () + OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) +Counter = typing.Counter +ChainMap = typing.ChainMap +AsyncGenerator = typing.AsyncGenerator NewType = typing.NewType Text = typing.Text TYPE_CHECKING = typing.TYPE_CHECKING -def _gorg(cls): - """This function exists for compatibility with old typing versions.""" - assert isinstance(cls, GenericMeta) - if hasattr(cls, '_gorg'): - return cls._gorg - while cls.__origin__ is not None: - cls = cls.__origin__ - return cls - - _PROTO_WHITELIST = ['Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', @@ -507,18 +417,57 @@ def _is_callable_members_only(cls): return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) +def _maybe_adjust_parameters(cls): + """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__. + + The contents of this function are very similar + to logic found in typing.Generic.__init_subclass__ + on the CPython main branch. + """ + tvars = [] + if '__orig_bases__' in cls.__dict__: + tvars = typing._collect_type_vars(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...] and/or Protocol[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, typing._GenericAlias) and + base.__origin__ in (typing.Generic, Protocol)): + # for error messages + the_base = base.__origin__.__name__ + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...]" + " and/or Protocol[...] multiple types.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in {the_base}[{s_args}]") + tvars = gvars + cls.__parameters__ = tuple(tvars) + + # 3.8+ if hasattr(typing, 'Protocol'): Protocol = typing.Protocol # 3.7 -elif PEP_560: - from typing import _collect_type_vars # noqa +else: def _no_init(self, *args, **kwargs): if type(self)._is_protocol: raise TypeError('Protocols cannot be instantiated') - class _ProtocolMeta(abc.ABCMeta): + class _ProtocolMeta(abc.ABCMeta): # noqa: B024 # This metaclass is a bit unfortunate and exists only because of the lack # of __instancehook__. def __instancecheck__(cls, instance): @@ -600,47 +549,17 @@ def __class_getitem__(cls, params): "Parameters to Protocol[...] must all be unique") else: # Subscripting a regular Generic subclass. - _check_generic(cls, params) + _check_generic(cls, params, len(cls.__parameters__)) return typing._GenericAlias(cls, params) def __init_subclass__(cls, *args, **kwargs): - tvars = [] if '__orig_bases__' in cls.__dict__: error = typing.Generic in cls.__orig_bases__ else: error = typing.Generic in cls.__bases__ if error: raise TypeError("Cannot inherit from plain Generic") - if '__orig_bases__' in cls.__dict__: - tvars = _collect_type_vars(cls.__orig_bases__) - # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. - # If found, tvars must be a subset of it. - # If not found, tvars is it. - # Also check for and reject plain Generic, - # and reject multiple Generic[...] and/or Protocol[...]. - gvars = None - for base in cls.__orig_bases__: - if (isinstance(base, typing._GenericAlias) and - base.__origin__ in (typing.Generic, Protocol)): - # for error messages - the_base = base.__origin__.__name__ - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...]" - " and/or Protocol[...] multiple types.") - gvars = base.__parameters__ - if gvars is None: - gvars = tvars - else: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) - s_args = ', '.join(str(g) for g in gvars) - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in {the_base}[{s_args}]") - tvars = gvars - cls.__parameters__ = tuple(tvars) + _maybe_adjust_parameters(cls) # Determine if this is a protocol or a concrete subclass. if not cls.__dict__.get('_is_protocol', None): @@ -694,250 +613,12 @@ def _proto_hook(other): raise TypeError('Protocols can only inherit from other' f' protocols, got {repr(base)}') cls.__init__ = _no_init -# 3.6 -else: - from typing import _next_in_mro, _type_check # noqa - - def _no_init(self, *args, **kwargs): - if type(self)._is_protocol: - raise TypeError('Protocols cannot be instantiated') - - class _ProtocolMeta(GenericMeta): - """Internal metaclass for Protocol. - - This exists so Protocol classes can be generic without deriving - from Generic. - """ - def __new__(cls, name, bases, namespace, - tvars=None, args=None, origin=None, extra=None, orig_bases=None): - # This is just a version copied from GenericMeta.__new__ that - # includes "Protocol" special treatment. (Comments removed for brevity.) - assert extra is None # Protocols should not have extra - if tvars is not None: - assert origin is not None - assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars - else: - tvars = _type_vars(bases) - gvars = None - for base in bases: - if base is typing.Generic: - raise TypeError("Cannot inherit from plain Generic") - if (isinstance(base, GenericMeta) and - base.__origin__ in (typing.Generic, Protocol)): - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...] or" - " Protocol[...] multiple times.") - gvars = base.__parameters__ - if gvars is None: - gvars = tvars - else: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ", ".join(str(t) for t in tvars if t not in gvarset) - s_args = ", ".join(str(g) for g in gvars) - cls_name = "Generic" if any(b.__origin__ is typing.Generic - for b in bases) else "Protocol" - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in {cls_name}[{s_args}]") - tvars = gvars - - initial_bases = bases - if (extra is not None and type(extra) is abc.ABCMeta and - extra not in bases): - bases = (extra,) + bases - bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b - for b in bases) - if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases): - bases = tuple(b for b in bases if b is not typing.Generic) - namespace.update({'__origin__': origin, '__extra__': extra}) - self = super(GenericMeta, cls).__new__(cls, name, bases, namespace, - _root=True) - super(GenericMeta, self).__setattr__('_gorg', - self if not origin else - _gorg(origin)) - self.__parameters__ = tvars - self.__args__ = tuple(... if a is typing._TypingEllipsis else - () if a is typing._TypingEmpty else - a for a in args) if args else None - self.__next_in_mro__ = _next_in_mro(self) - if orig_bases is None: - self.__orig_bases__ = initial_bases - elif origin is not None: - self._abc_registry = origin._abc_registry - self._abc_cache = origin._abc_cache - if hasattr(self, '_subs_tree'): - self.__tree_hash__ = (hash(self._subs_tree()) if origin else - super(GenericMeta, self).__hash__()) - return self - - def __init__(cls, *args, **kwargs): - super().__init__(*args, **kwargs) - if not cls.__dict__.get('_is_protocol', None): - cls._is_protocol = any(b is Protocol or - isinstance(b, _ProtocolMeta) and - b.__origin__ is Protocol - for b in cls.__bases__) - if cls._is_protocol: - for base in cls.__mro__[1:]: - if not (base in (object, typing.Generic) or - base.__module__ == 'collections.abc' and - base.__name__ in _PROTO_WHITELIST or - isinstance(base, typing.TypingMeta) and base._is_protocol or - isinstance(base, GenericMeta) and - base.__origin__ is typing.Generic): - raise TypeError(f'Protocols can only inherit from other' - f' protocols, got {repr(base)}') - - cls.__init__ = _no_init - - def _proto_hook(other): - if not cls.__dict__.get('_is_protocol', None): - return NotImplemented - if not isinstance(other, type): - # Same error as for issubclass(1, int) - raise TypeError('issubclass() arg 1 must be a class') - for attr in _get_protocol_attrs(cls): - for base in other.__mro__: - if attr in base.__dict__: - if base.__dict__[attr] is None: - return NotImplemented - break - annotations = getattr(base, '__annotations__', {}) - if (isinstance(annotations, typing.Mapping) and - attr in annotations and - isinstance(other, _ProtocolMeta) and - other._is_protocol): - break - else: - return NotImplemented - return True - if '__subclasshook__' not in cls.__dict__: - cls.__subclasshook__ = _proto_hook - - def __instancecheck__(self, instance): - # We need this method for situations where attributes are - # assigned in __init__. - if ((not getattr(self, '_is_protocol', False) or - _is_callable_members_only(self)) and - issubclass(instance.__class__, self)): - return True - if self._is_protocol: - if all(hasattr(instance, attr) and - (not callable(getattr(self, attr, None)) or - getattr(instance, attr) is not None) - for attr in _get_protocol_attrs(self)): - return True - return super(GenericMeta, self).__instancecheck__(instance) - - def __subclasscheck__(self, cls): - if self.__origin__ is not None: - if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: - raise TypeError("Parameterized generics cannot be used with class " - "or instance checks") - return False - if (self.__dict__.get('_is_protocol', None) and - not self.__dict__.get('_is_runtime_protocol', None)): - if sys._getframe(1).f_globals['__name__'] in ['abc', - 'functools', - 'typing']: - return False - raise TypeError("Instance and class checks can only be used with" - " @runtime protocols") - if (self.__dict__.get('_is_runtime_protocol', None) and - not _is_callable_members_only(self)): - if sys._getframe(1).f_globals['__name__'] in ['abc', - 'functools', - 'typing']: - return super(GenericMeta, self).__subclasscheck__(cls) - raise TypeError("Protocols with non-method members" - " don't support issubclass()") - return super(GenericMeta, self).__subclasscheck__(cls) - - @typing._tp_cache - def __getitem__(self, params): - # We also need to copy this from GenericMeta.__getitem__ to get - # special treatment of "Protocol". (Comments removed for brevity.) - if not isinstance(params, tuple): - params = (params,) - if not params and _gorg(self) is not typing.Tuple: - raise TypeError( - f"Parameter list to {self.__qualname__}[...] cannot be empty") - msg = "Parameters to generic types must be types." - params = tuple(_type_check(p, msg) for p in params) - if self in (typing.Generic, Protocol): - if not all(isinstance(p, typing.TypeVar) for p in params): - raise TypeError( - f"Parameters to {repr(self)}[...] must all be type variables") - if len(set(params)) != len(params): - raise TypeError( - f"Parameters to {repr(self)}[...] must all be unique") - tvars = params - args = params - elif self in (typing.Tuple, typing.Callable): - tvars = _type_vars(params) - args = params - elif self.__origin__ in (typing.Generic, Protocol): - raise TypeError(f"Cannot subscript already-subscripted {repr(self)}") - else: - _check_generic(self, params) - tvars = _type_vars(params) - args = params - - prepend = (self,) if self.__origin__ is None else () - return self.__class__(self.__name__, - prepend + self.__bases__, - _no_slots_copy(self.__dict__), - tvars=tvars, - args=args, - origin=self, - extra=self.__extra__, - orig_bases=self.__orig_bases__) - - class Protocol(metaclass=_ProtocolMeta): - """Base class for protocol classes. Protocol classes are defined as:: - - class Proto(Protocol): - def meth(self) -> int: - ... - - Such classes are primarily used with static type checkers that recognize - structural subtyping (static duck-typing), for example:: - - class C: - def meth(self) -> int: - return 0 - - def func(x: Proto) -> int: - return x.meth() - - func(C()) # Passes static type check - - See PEP 544 for details. Protocol classes decorated with - @typing_extensions.runtime act as simple-minded runtime protocol that checks - only the presence of given attributes, ignoring their type signatures. - - Protocol classes can be generic, they are defined as:: - - class GenProto(Protocol[T]): - def meth(self) -> T: - ... - """ - __slots__ = () - _is_protocol = True - - def __new__(cls, *args, **kwds): - if _gorg(cls) is Protocol: - raise TypeError("Type Protocol cannot be instantiated; " - "it can be used only as a base class") - return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds) # 3.8+ if hasattr(typing, 'runtime_checkable'): runtime_checkable = typing.runtime_checkable -# 3.6-3.7 +# 3.7 else: def runtime_checkable(cls): """Mark a protocol class as a runtime protocol, so that it @@ -961,7 +642,7 @@ def runtime_checkable(cls): # 3.8+ if hasattr(typing, 'SupportsIndex'): SupportsIndex = typing.SupportsIndex -# 3.6-3.7 +# 3.7 else: @runtime_checkable class SupportsIndex(Protocol): @@ -972,12 +653,17 @@ def __index__(self) -> int: pass -if sys.version_info >= (3, 9, 2): +if hasattr(typing, "Required"): # The standard library TypedDict in Python 3.8 does not store runtime information # about which (if any) keys are optional. See https://bugs.python.org/issue38834 # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 + # The standard library TypedDict below Python 3.11 does not store runtime + # information about optional and required keys when using Required or NotRequired. + # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11. TypedDict = typing.TypedDict + _TypedDictMeta = typing._TypedDictMeta + is_typeddict = typing.is_typeddict else: def _check_fails(cls, other): try: @@ -1045,6 +731,8 @@ def _typeddict_new(*args, total=True, **kwargs): _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,' ' /, *, total=True, **kwargs)') + _TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters + class _TypedDictMeta(type): def __init__(cls, name, bases, ns, total=True): super().__init__(name, bases, ns) @@ -1057,14 +745,23 @@ def __new__(cls, name, bases, ns, total=True): # Subclasses and instances of TypedDict return actual dictionaries # via _dict_new. ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new + # Don't insert typing.Generic into __bases__ here, + # or Generic.__init_subclass__ will raise TypeError + # in the super().__new__() call. + # Instead, monkey-patch __bases__ onto the class after it's been created. tp_dict = super().__new__(cls, name, (dict,), ns) + if any(issubclass(base, typing.Generic) for base in bases): + tp_dict.__bases__ = (typing.Generic, dict) + _maybe_adjust_parameters(tp_dict) + annotations = {} own_annotations = ns.get('__annotations__', {}) - own_annotation_keys = set(own_annotations.keys()) msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + kwds = {"module": tp_dict.__module__} if _TAKES_MODULE else {} own_annotations = { - n: typing._type_check(tp, msg) for n, tp in own_annotations.items() + n: typing._type_check(tp, msg, **kwds) + for n, tp in own_annotations.items() } required_keys = set() optional_keys = set() @@ -1075,10 +772,22 @@ def __new__(cls, name, bases, ns, total=True): optional_keys.update(base.__dict__.get('__optional_keys__', ())) annotations.update(own_annotations) - if total: - required_keys.update(own_annotation_keys) - else: - optional_keys.update(own_annotation_keys) + for annotation_key, annotation_type in own_annotations.items(): + annotation_origin = get_origin(annotation_type) + if annotation_origin is Annotated: + annotation_args = get_args(annotation_type) + if annotation_args: + annotation_type = annotation_args[0] + annotation_origin = get_origin(annotation_type) + + if annotation_origin is Required: + required_keys.add(annotation_key) + elif annotation_origin is NotRequired: + optional_keys.add(annotation_key) + elif total: + required_keys.add(annotation_key) + else: + optional_keys.add(annotation_key) tp_dict.__annotations__ = annotations tp_dict.__required_keys__ = frozenset(required_keys) @@ -1121,121 +830,74 @@ class Point2D(TypedDict): syntax forms work for Python 2.7 and 3.2+ """ + if hasattr(typing, "_TypedDictMeta"): + _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta) + else: + _TYPEDDICT_TYPES = (_TypedDictMeta,) -# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints) -if hasattr(typing, 'Annotated'): - Annotated = typing.Annotated - get_type_hints = typing.get_type_hints - # Not exported and not a public API, but needed for get_origin() and get_args() - # to work. - _AnnotatedAlias = typing._AnnotatedAlias -# 3.7-3.8 -elif PEP_560: - class _AnnotatedAlias(typing._GenericAlias, _root=True): - """Runtime representation of an annotated type. + def is_typeddict(tp): + """Check if an annotation is a TypedDict class - At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' - with extra annotations. The alias behaves like a normal typing alias, - instantiating is the same as instantiating the underlying type, binding - it to types is also the same. + For example:: + class Film(TypedDict): + title: str + year: int + + is_typeddict(Film) # => True + is_typeddict(Union[list, str]) # => False """ - def __init__(self, origin, metadata): - if isinstance(origin, _AnnotatedAlias): - metadata = origin.__metadata__ + metadata - origin = origin.__origin__ - super().__init__(origin, origin) - self.__metadata__ = metadata + return isinstance(tp, tuple(_TYPEDDICT_TYPES)) - def copy_with(self, params): - assert len(params) == 1 - new_type = params[0] - return _AnnotatedAlias(new_type, self.__metadata__) - def __repr__(self): - return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " - f"{', '.join(repr(a) for a in self.__metadata__)}]") +if hasattr(typing, "assert_type"): + assert_type = typing.assert_type - def __reduce__(self): - return operator.getitem, ( - Annotated, (self.__origin__,) + self.__metadata__ - ) - - def __eq__(self, other): - if not isinstance(other, _AnnotatedAlias): - return NotImplemented - if self.__origin__ != other.__origin__: - return False - return self.__metadata__ == other.__metadata__ - - def __hash__(self): - return hash((self.__origin__, self.__metadata__)) - - class Annotated: - """Add context specific metadata to a type. - - Example: Annotated[int, runtime_check.Unsigned] indicates to the - hypothetical runtime_check module that this type is an unsigned int. - Every other consumer of this type can ignore this metadata and treat - this type as int. - - The first argument to Annotated must be a valid type (and will be in - the __origin__ field), the remaining arguments are kept as a tuple in - the __extra__ field. - - Details: - - - It's an error to call `Annotated` with less than two arguments. - - Nested Annotated are flattened:: - - Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - - - Instantiating an annotated type is equivalent to instantiating the - underlying type:: - - Annotated[C, Ann1](5) == C(5) +else: + def assert_type(__val, __typ): + """Assert (to the type checker) that the value is of the given type. - - Annotated can be used as a generic type alias:: + When the type checker encounters a call to assert_type(), it + emits an error if the value is not of the specified type:: - Optimized = Annotated[T, runtime.Optimize()] - Optimized[int] == Annotated[int, runtime.Optimize()] + def greet(name: str) -> None: + assert_type(name, str) # ok + assert_type(name, int) # type checker error - OptimizedList = Annotated[List[T], runtime.Optimize()] - OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + At runtime this returns the first argument unchanged and otherwise + does nothing. """ + return __val - __slots__ = () - def __new__(cls, *args, **kwargs): - raise TypeError("Type Annotated cannot be instantiated.") - - @typing._tp_cache - def __class_getitem__(cls, params): - if not isinstance(params, tuple) or len(params) < 2: - raise TypeError("Annotated[...] should be used " - "with at least two arguments (a type and an " - "annotation).") - msg = "Annotated[t, ...]: t must be a type." - origin = typing._type_check(params[0], msg) - metadata = tuple(params[1:]) - return _AnnotatedAlias(origin, metadata) - - def __init_subclass__(cls, *args, **kwargs): - raise TypeError( - f"Cannot subclass {cls.__module__}.Annotated" - ) +if hasattr(typing, "Required"): + get_type_hints = typing.get_type_hints +else: + import functools + import types - def _strip_annotations(t): - """Strips the annotations from a given type. - """ + # replaces _strip_annotations() + def _strip_extras(t): + """Strips Annotated, Required and NotRequired from a given type.""" if isinstance(t, _AnnotatedAlias): - return _strip_annotations(t.__origin__) + return _strip_extras(t.__origin__) + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired): + return _strip_extras(t.__args__[0]) if isinstance(t, typing._GenericAlias): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + stripped_args = tuple(_strip_extras(a) for a in t.__args__) if stripped_args == t.__args__: return t - res = t.copy_with(stripped_args) - res._special = t._special - return res + return t.copy_with(stripped_args) + if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return types.GenericAlias(t.__origin__, stripped_args) + if hasattr(types, "UnionType") and isinstance(t, types.UnionType): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return functools.reduce(operator.or_, stripped_args) + return t def get_type_hints(obj, globalns=None, localns=None, include_extras=False): @@ -1244,7 +906,8 @@ def get_type_hints(obj, globalns=None, localns=None, include_extras=False): This is often the same as obj.__annotations__, but it handles forward references encoded as string literals, adds Optional[t] if a default value equal to None is set and recursively replaces all - 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). + 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' + (unless 'include_extras=True'). The argument may be a module, class, method, or function. The annotations are returned as a dictionary. For classes, annotations include also @@ -1269,120 +932,65 @@ def get_type_hints(obj, globalns=None, localns=None, include_extras=False): - If two dict arguments are passed, they specify globals and locals, respectively. """ - hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + if hasattr(typing, "Annotated"): + hint = typing.get_type_hints( + obj, globalns=globalns, localns=localns, include_extras=True + ) + else: + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) if include_extras: return hint - return {k: _strip_annotations(t) for k, t in hint.items()} -# 3.6 -else: - - def _is_dunder(name): - """Returns True if name is a __dunder_variable_name__.""" - return len(name) > 4 and name.startswith('__') and name.endswith('__') - - # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality - # checks, argument expansion etc. are done on the _subs_tre. As a result we - # can't provide a get_type_hints function that strips out annotations. + return {k: _strip_extras(t) for k, t in hint.items()} - class AnnotatedMeta(typing.GenericMeta): - """Metaclass for Annotated""" - def __new__(cls, name, bases, namespace, **kwargs): - if any(b is not object for b in bases): - raise TypeError("Cannot subclass " + str(Annotated)) - return super().__new__(cls, name, bases, namespace, **kwargs) +# Python 3.9+ has PEP 593 (Annotated) +if hasattr(typing, 'Annotated'): + Annotated = typing.Annotated + # Not exported and not a public API, but needed for get_origin() and get_args() + # to work. + _AnnotatedAlias = typing._AnnotatedAlias +# 3.7-3.8 +else: + class _AnnotatedAlias(typing._GenericAlias, _root=True): + """Runtime representation of an annotated type. - @property - def __metadata__(self): - return self._subs_tree()[2] + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra annotations. The alias behaves like a normal typing alias, + instantiating is the same as instantiating the underlying type, binding + it to types is also the same. + """ + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin) + self.__metadata__ = metadata - def _tree_repr(self, tree): - cls, origin, metadata = tree - if not isinstance(origin, tuple): - tp_repr = typing._type_repr(origin) - else: - tp_repr = origin[0]._tree_repr(origin) - metadata_reprs = ", ".join(repr(arg) for arg in metadata) - return f'{cls}[{tp_repr}, {metadata_reprs}]' - - def _subs_tree(self, tvars=None, args=None): # noqa - if self is Annotated: - return Annotated - res = super()._subs_tree(tvars=tvars, args=args) - # Flatten nested Annotated - if isinstance(res[1], tuple) and res[1][0] is Annotated: - sub_tp = res[1][1] - sub_annot = res[1][2] - return (Annotated, sub_tp, sub_annot + res[2]) - return res + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) - def _get_cons(self): - """Return the class used to create instance of this type.""" - if self.__origin__ is None: - raise TypeError("Cannot get the underlying type of a " - "non-specialized Annotated type.") - tree = self._subs_tree() - while isinstance(tree, tuple) and tree[0] is Annotated: - tree = tree[1] - if isinstance(tree, tuple): - return tree[0] - else: - return tree + def __repr__(self): + return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " + f"{', '.join(repr(a) for a in self.__metadata__)}]") - @typing._tp_cache - def __getitem__(self, params): - if not isinstance(params, tuple): - params = (params,) - if self.__origin__ is not None: # specializing an instantiated type - return super().__getitem__(params) - elif not isinstance(params, tuple) or len(params) < 2: - raise TypeError("Annotated[...] should be instantiated " - "with at least two arguments (a type and an " - "annotation).") - else: - msg = "Annotated[t, ...]: t must be a type." - tp = typing._type_check(params[0], msg) - metadata = tuple(params[1:]) - return self.__class__( - self.__name__, - self.__bases__, - _no_slots_copy(self.__dict__), - tvars=_type_vars((tp,)), - # Metadata is a tuple so it won't be touched by _replace_args et al. - args=(tp, metadata), - origin=self, + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ ) - def __call__(self, *args, **kwargs): - cons = self._get_cons() - result = cons(*args, **kwargs) - try: - result.__orig_class__ = self - except AttributeError: - pass - return result - - def __getattr__(self, attr): - # For simplicity we just don't relay all dunder names - if self.__origin__ is not None and not _is_dunder(attr): - return getattr(self._get_cons(), attr) - raise AttributeError(attr) - - def __setattr__(self, attr, value): - if _is_dunder(attr) or attr.startswith('_abc_'): - super().__setattr__(attr, value) - elif self.__origin__ is None: - raise AttributeError(attr) - else: - setattr(self._get_cons(), attr, value) - - def __instancecheck__(self, obj): - raise TypeError("Annotated cannot be used with isinstance().") + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + if self.__origin__ != other.__origin__: + return False + return self.__metadata__ == other.__metadata__ - def __subclasscheck__(self, cls): - raise TypeError("Annotated cannot be used with issubclass().") + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) - class Annotated(metaclass=AnnotatedMeta): + class Annotated: """Add context specific metadata to a type. Example: Annotated[int, runtime_check.Unsigned] indicates to the @@ -1390,8 +998,9 @@ class Annotated(metaclass=AnnotatedMeta): Every other consumer of this type can ignore this metadata and treat this type as int. - The first argument to Annotated must be a valid type, the remaining - arguments are kept as a tuple in the __metadata__ field. + The first argument to Annotated must be a valid type (and will be in + the __origin__ field), the remaining arguments are kept as a tuple in + the __extra__ field. Details: @@ -1414,6 +1023,31 @@ class Annotated(metaclass=AnnotatedMeta): OptimizedList[int] == Annotated[List[int], runtime.Optimize()] """ + __slots__ = () + + def __new__(cls, *args, **kwargs): + raise TypeError("Type Annotated cannot be instantiated.") + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + allowed_special_forms = (ClassVar, Final) + if get_origin(params[0]) in allowed_special_forms: + origin = params[0] + else: + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError( + f"Cannot subclass {cls.__module__}.Annotated" + ) + # Python 3.8 has get_origin() and get_args() but those implementations aren't # Annotated-aware, so we can't use those. Python 3.9's versions don't support # ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. @@ -1421,7 +1055,7 @@ class Annotated(metaclass=AnnotatedMeta): get_origin = typing.get_origin get_args = typing.get_args # 3.7-3.9 -elif PEP_560: +else: try: # 3.9+ from typing import _BaseGenericAlias @@ -1429,9 +1063,9 @@ class Annotated(metaclass=AnnotatedMeta): _BaseGenericAlias = typing._GenericAlias try: # 3.9+ - from typing import GenericAlias + from typing import GenericAlias as _typing_GenericAlias except ImportError: - GenericAlias = typing._GenericAlias + _typing_GenericAlias = typing._GenericAlias def get_origin(tp): """Get the unsubscripted version of a type. @@ -1450,7 +1084,7 @@ def get_origin(tp): """ if isinstance(tp, _AnnotatedAlias): return Annotated - if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias, + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias, ParamSpecArgs, ParamSpecKwargs)): return tp.__origin__ if tp is typing.Generic: @@ -1470,7 +1104,7 @@ def get_args(tp): """ if isinstance(tp, _AnnotatedAlias): return (tp.__origin__,) + tp.__metadata__ - if isinstance(tp, (typing._GenericAlias, GenericAlias)): + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)): if getattr(tp, "_special", False): return () res = tp.__args__ @@ -1503,7 +1137,7 @@ def TypeAlias(self, parameters): """ raise TypeError(f"{self} is not subscriptable") # 3.7-3.8 -elif sys.version_info[:2] >= (3, 7): +else: class _TypeAliasForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @@ -1519,44 +1153,51 @@ def __repr__(self): It's invalid when used anywhere except as in the example above.""") -# 3.6 -else: - class _TypeAliasMeta(typing.TypingMeta): - """Metaclass for TypeAlias""" - def __repr__(self): - return 'typing_extensions.TypeAlias' - class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True): - """Special marker indicating that an assignment should - be recognized as a proper type alias definition by type - checkers. +class _DefaultMixin: + """Mixin for TypeVarLike defaults.""" - For example:: + __slots__ = () - Predicate: TypeAlias = Callable[..., bool] + def __init__(self, default): + if isinstance(default, (tuple, list)): + self.__default__ = tuple((typing._type_check(d, "Default must be a type") + for d in default)) + elif default != _marker: + self.__default__ = typing._type_check(default, "Default must be a type") + else: + self.__default__ = None - It's invalid when used anywhere except as in the example above. - """ - __slots__ = () - def __instancecheck__(self, obj): - raise TypeError("TypeAlias cannot be used with isinstance().") +# Add default and infer_variance parameters from PEP 696 and 695 +class TypeVar(typing.TypeVar, _DefaultMixin, _root=True): + """Type variable.""" - def __subclasscheck__(self, cls): - raise TypeError("TypeAlias cannot be used with issubclass().") + __module__ = 'typing' - def __repr__(self): - return 'typing_extensions.TypeAlias' + def __init__(self, name, *constraints, bound=None, + covariant=False, contravariant=False, + default=_marker, infer_variance=False): + super().__init__(name, *constraints, bound=bound, covariant=covariant, + contravariant=contravariant) + _DefaultMixin.__init__(self, default) + self.__infer_variance__ = infer_variance - TypeAlias = _TypeAliasBase(_root=True) + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod # Python 3.10+ has PEP 612 if hasattr(typing, 'ParamSpecArgs'): ParamSpecArgs = typing.ParamSpecArgs ParamSpecKwargs = typing.ParamSpecKwargs -# 3.6-3.9 +# 3.7-3.9 else: class _Immutable: """Mixin to indicate that object should not be copied.""" @@ -1586,6 +1227,11 @@ def __init__(self, origin): def __repr__(self): return f"{self.__origin__.__name__}.args" + def __eq__(self, other): + if not isinstance(other, ParamSpecArgs): + return NotImplemented + return self.__origin__ == other.__origin__ + class ParamSpecKwargs(_Immutable): """The kwargs for a ParamSpec object. @@ -1604,14 +1250,39 @@ def __init__(self, origin): def __repr__(self): return f"{self.__origin__.__name__}.kwargs" + def __eq__(self, other): + if not isinstance(other, ParamSpecKwargs): + return NotImplemented + return self.__origin__ == other.__origin__ + # 3.10+ if hasattr(typing, 'ParamSpec'): - ParamSpec = typing.ParamSpec -# 3.6-3.9 + + # Add default Parameter - PEP 696 + class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True): + """Parameter specification variable.""" + + __module__ = 'typing' + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + default=_marker): + super().__init__(name, bound=bound, covariant=covariant, + contravariant=contravariant) + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + +# 3.7-3.9 else: # Inherits from list as a workaround for Callable checks in Python < 3.9.2. - class ParamSpec(list): + class ParamSpec(list, _DefaultMixin): """Parameter specification variable. Usage:: @@ -1669,7 +1340,8 @@ def args(self): def kwargs(self): return ParamSpecKwargs(self) - def __init__(self, name, *, bound=None, covariant=False, contravariant=False): + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + default=_marker): super().__init__([self]) self.__name__ = name self.__covariant__ = bool(covariant) @@ -1678,6 +1350,7 @@ def __init__(self, name, *, bound=None, covariant=False, contravariant=False): self.__bound__ = typing._type_check(bound, 'Bound must be a type.') else: self.__bound__ = None + _DefaultMixin.__init__(self, default) # for pickling: try: @@ -1709,28 +1382,17 @@ def __reduce__(self): def __call__(self, *args, **kwargs): pass - if not PEP_560: - # Only needed in 3.6. - def _get_type_vars(self, tvars): - if self not in tvars: - tvars.append(self) - -# 3.6-3.9 +# 3.7-3.9 if not hasattr(typing, 'Concatenate'): # Inherits from list as a workaround for Callable checks in Python < 3.9.2. class _ConcatenateGenericAlias(list): # Trick Generic into looking into this for __parameters__. - if PEP_560: - __class__ = typing._GenericAlias - else: - __class__ = typing._TypingBase + __class__ = typing._GenericAlias # Flag in 3.8. _special = False - # Attribute in 3.6 and earlier. - _gorg = typing.Generic def __init__(self, origin, args): super().__init__(args) @@ -1755,14 +1417,8 @@ def __parameters__(self): tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) ) - if not PEP_560: - # Only required in 3.6. - def _get_type_vars(self, tvars): - if self.__origin__ and self.__parameters__: - typing._get_type_vars(self.__parameters__, tvars) - -# 3.6-3.9 +# 3.7-3.9 @typing._tp_cache def _concatenate_getitem(self, parameters): if parameters == (): @@ -1797,7 +1453,7 @@ def Concatenate(self, parameters): """ return _concatenate_getitem(self, parameters) # 3.7-8 -elif sys.version_info[:2] >= (3, 7): +else: class _ConcatenateForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @@ -1817,42 +1473,6 @@ def __getitem__(self, parameters): See PEP 612 for detailed information. """) -# 3.6 -else: - class _ConcatenateAliasMeta(typing.TypingMeta): - """Metaclass for Concatenate.""" - - def __repr__(self): - return 'typing_extensions.Concatenate' - - class _ConcatenateAliasBase(typing._FinalTypingBase, - metaclass=_ConcatenateAliasMeta, - _root=True): - """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a - higher order function which adds, removes or transforms parameters of a - callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """ - __slots__ = () - - def __instancecheck__(self, obj): - raise TypeError("Concatenate cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError("Concatenate cannot be used with issubclass().") - - def __repr__(self): - return 'typing_extensions.Concatenate' - - def __getitem__(self, parameters): - return _concatenate_getitem(self, parameters) - - Concatenate = _ConcatenateAliasBase(_root=True) # 3.10+ if hasattr(typing, 'TypeGuard'): @@ -1907,10 +1527,10 @@ def is_str(val: Union[str, float]): ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). """ - item = typing._type_check(parameters, f'{self} accepts only single type.') + item = typing._type_check(parameters, f'{self} accepts only a single type.') return typing._GenericAlias(self, (item,)) # 3.7-3.8 -elif sys.version_info[:2] >= (3, 7): +else: class _TypeGuardForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -1965,135 +1585,78 @@ def is_str(val: Union[str, float]): ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). """) -# 3.6 -else: - class _TypeGuard(typing._FinalTypingBase, _root=True): - """Special typing form used to annotate the return type of a user-defined - type guard function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeGuard[...]`` as its - return type to alert static type checkers to this intention. +# Vendored from cpython typing._SpecialFrom +class _SpecialForm(typing._Final, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') - Using ``-> TypeGuard`` tells the static type checker that for a given - function: + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the type inside ``TypeGuard``. - - For example:: - - def is_str(val: Union[str, float]): - # "isinstance" type guard - if isinstance(val, str): - # Type of ``val`` is narrowed to ``str`` - ... - else: - # Else, type of ``val`` is narrowed to ``float``. - ... + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``List[object]`` to ``List[str]`` even though the latter is not - a subtype of the former, since ``List`` is invariant. The responsibility of - writing type-safe type guards is left to the user. + raise AttributeError(item) - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). - """ + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") - __slots__ = ('__type__',) + def __repr__(self): + return f'typing_extensions.{self._name}' - def __init__(self, tp=None, **kwds): - self.__type__ = tp + def __reduce__(self): + return self._name - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - f'{cls.__name__[1:]} accepts only a single type.'), - _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) + def __or__(self, other): + return typing.Union[self, other] - def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += f'[{typing._type_repr(self.__type__)}]' - return r + def __ror__(self, other): + return typing.Union[other, self] - def __hash__(self): - return hash((type(self).__name__, self.__type__)) + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") - def __eq__(self, other): - if not isinstance(other, _TypeGuard): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ - return self is other + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") - TypeGuard = _TypeGuard(_root=True) + @typing._tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) -if hasattr(typing, "Self"): - Self = typing.Self -elif sys.version_info[:2] >= (3, 7): - # Vendored from cpython typing._SpecialFrom - class _SpecialForm(typing._Final, _root=True): - __slots__ = ('_name', '__doc__', '_getitem') - def __init__(self, getitem): - self._getitem = getitem - self._name = getitem.__name__ - self.__doc__ = getitem.__doc__ - - def __getattr__(self, item): - if item in {'__name__', '__qualname__'}: - return self._name - - raise AttributeError(item) - - def __mro_entries__(self, bases): - raise TypeError(f"Cannot subclass {self!r}") - - def __repr__(self): - return f'typing_extensions.{self._name}' +if hasattr(typing, "LiteralString"): + LiteralString = typing.LiteralString +else: + @_SpecialForm + def LiteralString(self, params): + """Represents an arbitrary literal string. - def __reduce__(self): - return self._name + Example:: - def __call__(self, *args, **kwds): - raise TypeError(f"Cannot instantiate {self!r}") + from typing_extensions import LiteralString - def __or__(self, other): - return typing.Union[self, other] + def query(sql: LiteralString) -> ...: + ... - def __ror__(self, other): - return typing.Union[other, self] + query("SELECT * FROM table") # ok + query(f"SELECT * FROM {input()}") # not ok - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance()") + See PEP 675 for details. - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass()") + """ + raise TypeError(f"{self} is not subscriptable") - @typing._tp_cache - def __getitem__(self, parameters): - return self._getitem(self, parameters) +if hasattr(typing, "Self"): + Self = typing.Self +else: @_SpecialForm def Self(self, params): """Used to spell the type of "self" in classes. @@ -2110,30 +1673,36 @@ def parse(self, data: bytes) -> Self: """ raise TypeError(f"{self} is not subscriptable") -else: - class _Self(typing._FinalTypingBase, _root=True): - """Used to spell the type of "self" in classes. - Example:: - from typing import Self +if hasattr(typing, "Never"): + Never = typing.Never +else: + @_SpecialForm + def Never(self, params): + """The bottom type, a type that has no members. - class ReturnsSelf: - def parse(self, data: bytes) -> Self: - ... - return self + This can be used to define a function that should never be + called, or a function that never returns:: - """ + from typing_extensions import Never - __slots__ = () + def never_call_me(arg: Never) -> None: + pass - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance().") + def int_or_str(arg: int | str) -> None: + never_call_me(arg) # type checker error + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + never_call_me(arg) # ok, arg is of type Never - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass().") + """ - Self = _Self(_root=True) + raise TypeError(f"{self} is not subscriptable") if hasattr(typing, 'Required'): @@ -2161,7 +1730,7 @@ class Movie(TypedDict, total=False): There is no runtime checking that a required key is actually provided when instantiating a related TypedDict. """ - item = typing._type_check(parameters, f'{self._name} accepts only single type') + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) @_ExtensionsSpecialForm @@ -2178,17 +1747,17 @@ class Movie(TypedDict): year=1999, ) """ - item = typing._type_check(parameters, f'{self._name} accepts only single type') + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) -elif sys.version_info[:2] >= (3, 7): +else: class _RequiredForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): item = typing._type_check(parameters, - '{} accepts only single type'.format(self._name)) + f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) Required = _RequiredForm( @@ -2222,75 +1791,522 @@ class Movie(TypedDict): year=1999, ) """) + + +if hasattr(typing, "Unpack"): # 3.11+ + Unpack = typing.Unpack +elif sys.version_info[:2] >= (3, 9): + class _UnpackSpecialForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + + @_UnpackSpecialForm + def Unpack(self, parameters): + """A special typing construct to unpack a variadic type. For example: + + Shape = TypeVarTuple('Shape') + Batch = NewType('Batch', int) + + def add_batch_axis( + x: Array[Unpack[Shape]] + ) -> Array[Batch, Unpack[Shape]]: ... + + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + else: - # NOTE: Modeled after _Final's implementation when _FinalTypingBase available - class _MaybeRequired(typing._FinalTypingBase, _root=True): - __slots__ = ('__type__',) - - def __init__(self, tp=None, **kwds): - self.__type__ = tp - - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - '{} accepts only single type.'.format(cls.__name__[1:])), - _root=True) - raise TypeError('{} cannot be further subscripted' - .format(cls.__name__[1:])) - - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + class _UnpackForm(typing._SpecialForm, _root=True): def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += '[{}]'.format(typing._type_repr(self.__type__)) - return r + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + Unpack = _UnpackForm( + 'Unpack', + doc="""A special typing construct to unpack a variadic type. For example: + + Shape = TypeVarTuple('Shape') + Batch = NewType('Batch', int) + + def add_batch_axis( + x: Array[Unpack[Shape]] + ) -> Array[Batch, Unpack[Shape]]: ... + + """) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + + +if hasattr(typing, "TypeVarTuple"): # 3.11+ + + # Add default Parameter - PEP 696 + class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True): + """Type variable tuple.""" + + def __init__(self, name, *, default=_marker): + super().__init__(name) + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + +else: + class TypeVarTuple(_DefaultMixin): + """Type variable tuple. + + Usage:: + + Ts = TypeVarTuple('Ts') + + In the same way that a normal type variable is a stand-in for a single + type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* + type such as ``Tuple[int, str]``. + + Type variable tuples can be used in ``Generic`` declarations. + Consider the following example:: + + class Array(Generic[*Ts]): ... + + The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``, + where ``T1`` and ``T2`` are type variables. To use these type variables + as type parameters of ``Array``, we must *unpack* the type variable tuple using + the star operator: ``*Ts``. The signature of ``Array`` then behaves + as if we had simply written ``class Array(Generic[T1, T2]): ...``. + In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows + us to parameterise the class with an *arbitrary* number of type parameters. + + Type variable tuples can be used anywhere a normal ``TypeVar`` can. + This includes class definitions, as shown above, as well as function + signatures and variable annotations:: + + class Array(Generic[*Ts]): + + def __init__(self, shape: Tuple[*Ts]): + self._shape: Tuple[*Ts] = shape + + def get_shape(self) -> Tuple[*Ts]: + return self._shape + + shape = (Height(480), Width(640)) + x: Array[Height, Width] = Array(shape) + y = abs(x) # Inferred type is Array[Height, Width] + z = x + x # ... is Array[Height, Width] + x.get_shape() # ... is tuple[Height, Width] + + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + def __iter__(self): + yield self.__unpacked__ + + def __init__(self, name, *, default=_marker): + self.__name__ = name + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + self.__unpacked__ = Unpack[self] + + def __repr__(self): + return self.__name__ def __hash__(self): - return hash((type(self).__name__, self.__type__)) + return object.__hash__(self) def __eq__(self, other): - if not isinstance(other, type(self)): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ return self is other - class _Required(_MaybeRequired, _root=True): - """A special typing construct to mark a key of a total=False TypedDict - as required. For example: + def __reduce__(self): + return self.__name__ - class Movie(TypedDict, total=False): - title: Required[str] - year: int + def __init_subclass__(self, *args, **kwds): + if '_root' not in kwds: + raise TypeError("Cannot subclass special typing classes") - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. +if hasattr(typing, "reveal_type"): + reveal_type = typing.reveal_type +else: + def reveal_type(__obj: T) -> T: + """Reveal the inferred type of a variable. + + When a static type checker encounters a call to ``reveal_type()``, + it will emit the inferred type of the argument:: + + x: int = 1 + reveal_type(x) + + Running a static type checker (e.g., ``mypy``) on this example + will produce output similar to 'Revealed type is "builtins.int"'. + + At runtime, the function prints the runtime type of the + argument and returns it unchanged. + """ + print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr) + return __obj - class _NotRequired(_MaybeRequired, _root=True): - """A special typing construct to mark a key of a TypedDict as - potentially missing. For example: - class Movie(TypedDict): - title: str - year: NotRequired[int] +if hasattr(typing, "assert_never"): + assert_never = typing.assert_never +else: + def assert_never(__arg: Never) -> Never: + """Assert to the type checker that a line of code is unreachable. + + Example:: + + def int_or_str(arg: int | str) -> None: + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + assert_never(arg) + + If a type checker finds that a call to assert_never() is + reachable, it will emit an error. + + At runtime, this throws an exception when called. + + """ + raise AssertionError("Expected code to be unreachable") + + +if sys.version_info >= (3, 12): + # dataclass_transform exists in 3.11 but lacks the frozen_default parameter + dataclass_transform = typing.dataclass_transform +else: + def dataclass_transform( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + frozen_default: bool = False, + field_specifiers: typing.Tuple[ + typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], + ... + ] = (), + **kwargs: typing.Any, + ) -> typing.Callable[[T], T]: + """Decorator that marks a function, class, or metaclass as providing + dataclass-like behavior. + + Example: + + from typing_extensions import dataclass_transform + + _T = TypeVar("_T") + + # Used on a decorator function + @dataclass_transform() + def create_model(cls: type[_T]) -> type[_T]: + ... + return cls + + @create_model + class CustomerModel: + id: int + name: str + + # Used on a base class + @dataclass_transform() + class ModelBase: ... + + class CustomerModel(ModelBase): + id: int + name: str + + # Used on a metaclass + @dataclass_transform() + class ModelMeta(type): ... + + class ModelBase(metaclass=ModelMeta): ... + + class CustomerModel(ModelBase): + id: int + name: str + + Each of the ``CustomerModel`` classes defined in this example will now + behave similarly to a dataclass created with the ``@dataclasses.dataclass`` + decorator. For example, the type checker will synthesize an ``__init__`` + method. + + The arguments to this decorator can be used to customize this behavior: + - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be + True or False if it is omitted by the caller. + - ``order_default`` indicates whether the ``order`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``kw_only_default`` indicates whether the ``kw_only`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``frozen_default`` indicates whether the ``frozen`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``field_specifiers`` specifies a static list of supported classes + or functions that describe fields, similar to ``dataclasses.field()``. + + At runtime, this decorator records its arguments in the + ``__dataclass_transform__`` attribute on the decorated object. + + See PEP 681 for details. + + """ + def decorator(cls_or_fn): + cls_or_fn.__dataclass_transform__ = { + "eq_default": eq_default, + "order_default": order_default, + "kw_only_default": kw_only_default, + "frozen_default": frozen_default, + "field_specifiers": field_specifiers, + "kwargs": kwargs, + } + return cls_or_fn + return decorator + + +if hasattr(typing, "override"): + override = typing.override +else: + _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any]) + + def override(__arg: _F) -> _F: + """Indicate that a method is intended to override a method in a base class. + + Usage: + + class Base: + def method(self) -> None: ... + pass + + class Child(Base): + @override + def method(self) -> None: + super().method() + + When this decorator is applied to a method, the type checker will + validate that it overrides a method with the same name on a base class. + This helps prevent bugs that may occur when a base class is changed + without an equivalent change to a child class. + + There is no runtime checking of these properties. The decorator + sets the ``__override__`` attribute to ``True`` on the decorated object + to allow runtime introspection. + + See PEP 698 for details. + + """ + try: + __arg.__override__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return __arg + + +if hasattr(typing, "deprecated"): + deprecated = typing.deprecated +else: + _T = typing.TypeVar("_T") + + def deprecated( + __msg: str, + *, + category: typing.Optional[typing.Type[Warning]] = DeprecationWarning, + stacklevel: int = 1, + ) -> typing.Callable[[_T], _T]: + """Indicate that a class, function or overload is deprecated. + + Usage: + + @deprecated("Use B instead") + class A: + pass + + @deprecated("Use g instead") + def f(): + pass + + @overload + @deprecated("int support is deprecated") + def g(x: int) -> int: ... + @overload + def g(x: str) -> int: ... + + When this decorator is applied to an object, the type checker + will generate a diagnostic on usage of the deprecated object. + + No runtime warning is issued. The decorator sets the ``__deprecated__`` + attribute on the decorated object to the deprecation message + passed to the decorator. If applied to an overload, the decorator + must be after the ``@overload`` decorator for the attribute to + exist on the overload as returned by ``get_overloads()``. + + See PEP 702 for details. - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) """ + def decorator(__arg: _T) -> _T: + if category is None: + __arg.__deprecated__ = __msg + return __arg + elif isinstance(__arg, type): + original_new = __arg.__new__ + has_init = __arg.__init__ is not object.__init__ + + @functools.wraps(original_new) + def __new__(cls, *args, **kwargs): + warnings.warn(__msg, category=category, stacklevel=stacklevel + 1) + # Mirrors a similar check in object.__new__. + if not has_init and (args or kwargs): + raise TypeError(f"{cls.__name__}() takes no arguments") + if original_new is not object.__new__: + return original_new(cls, *args, **kwargs) + else: + return original_new(cls) + + __arg.__new__ = staticmethod(__new__) + __arg.__deprecated__ = __new__.__deprecated__ = __msg + return __arg + elif callable(__arg): + @functools.wraps(__arg) + def wrapper(*args, **kwargs): + warnings.warn(__msg, category=category, stacklevel=stacklevel + 1) + return __arg(*args, **kwargs) + + __arg.__deprecated__ = wrapper.__deprecated__ = __msg + return wrapper + else: + raise TypeError( + "@deprecated decorator with non-None category must be applied to " + f"a class or callable, not {__arg!r}" + ) + + return decorator + + +# We have to do some monkey patching to deal with the dual nature of +# Unpack/TypeVarTuple: +# - We want Unpack to be a kind of TypeVar so it gets accepted in +# Generic[Unpack[Ts]] +# - We want it to *not* be treated as a TypeVar for the purposes of +# counting generic parameters, so that when we subscript a generic, +# the runtime doesn't try to substitute the Unpack with the subscripted type. +if not hasattr(typing, "TypeVarTuple"): + typing._collect_type_vars = _collect_type_vars + typing._check_generic = _check_generic + + +# Backport typing.NamedTuple as it exists in Python 3.11. +# In 3.11, the ability to define generic `NamedTuple`s was supported. +# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8. +if sys.version_info >= (3, 11): + NamedTuple = typing.NamedTuple +else: + def _caller(): + try: + return sys._getframe(2).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): # For platforms without _getframe() + return None + + def _make_nmtuple(name, types, module, defaults=()): + fields = [n for n, t in types] + annotations = {n: typing._type_check(t, f"field {n} annotation must be a type") + for n, t in types} + nm_tpl = collections.namedtuple(name, fields, + defaults=defaults, module=module) + nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations + # The `_field_types` attribute was removed in 3.9; + # in earlier versions, it is the same as the `__annotations__` attribute + if sys.version_info < (3, 9): + nm_tpl._field_types = annotations + return nm_tpl + + _prohibited_namedtuple_fields = typing._prohibited + _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'}) + + class _NamedTupleMeta(type): + def __new__(cls, typename, bases, ns): + assert _NamedTuple in bases + for base in bases: + if base is not _NamedTuple and base is not typing.Generic: + raise TypeError( + 'can only inherit from a NamedTuple type and Generic') + bases = tuple(tuple if base is _NamedTuple else base for base in bases) + types = ns.get('__annotations__', {}) + default_names = [] + for field_name in types: + if field_name in ns: + default_names.append(field_name) + elif default_names: + raise TypeError(f"Non-default namedtuple field {field_name} " + f"cannot follow default field" + f"{'s' if len(default_names) > 1 else ''} " + f"{', '.join(default_names)}") + nm_tpl = _make_nmtuple( + typename, types.items(), + defaults=[ns[n] for n in default_names], + module=ns['__module__'] + ) + nm_tpl.__bases__ = bases + if typing.Generic in bases: + class_getitem = typing.Generic.__class_getitem__.__func__ + nm_tpl.__class_getitem__ = classmethod(class_getitem) + # update from user namespace without overriding special namedtuple attributes + for key in ns: + if key in _prohibited_namedtuple_fields: + raise AttributeError("Cannot overwrite NamedTuple attribute " + key) + elif key not in _special_namedtuple_fields and key not in nm_tpl._fields: + setattr(nm_tpl, key, ns[key]) + if typing.Generic in bases: + nm_tpl.__init_subclass__() + return nm_tpl + + def NamedTuple(__typename, __fields=None, **kwargs): + if __fields is None: + __fields = kwargs.items() + elif kwargs: + raise TypeError("Either list of fields or keywords" + " can be provided to NamedTuple, not both") + return _make_nmtuple(__typename, __fields, module=_caller()) + + NamedTuple.__doc__ = typing.NamedTuple.__doc__ + _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {}) + + # On 3.8+, alter the signature so that it matches typing.NamedTuple. + # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7, + # so just leave the signature as it is on 3.7. + if sys.version_info >= (3, 8): + NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)' + + def _namedtuple_mro_entries(bases): + assert NamedTuple in bases + return (_NamedTuple,) - Required = _Required(_root=True) - NotRequired = _NotRequired(_root=True) + NamedTuple.__mro_entries__ = _namedtuple_mro_entries diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktalib-2.2.6.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/LICENSE.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/LICENSE.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/LICENSE.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/LICENSE.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/METADATA similarity index 93% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/METADATA rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/METADATA index 20ceebf..5d2a438 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/METADATA +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: urllib3 -Version: 1.26.8 +Version: 1.26.15 Summary: HTTP library with thread-safe connection pooling, file post, and more. Home-page: https://urllib3.readthedocs.io/ Author: Andrey Petrov @@ -10,7 +10,6 @@ Project-URL: Documentation, https://urllib3.readthedocs.io/ Project-URL: Code, https://github.com/urllib3/urllib3 Project-URL: Issue tracker, https://github.com/urllib3/urllib3/issues Keywords: urllib httplib threadsafe filepost http https ssl pooling -Platform: UNKNOWN Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License @@ -19,7 +18,6 @@ Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 @@ -30,16 +28,19 @@ Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet :: WWW/HTTP Classifier: Topic :: Software Development :: Libraries -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4 +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* Description-Content-Type: text/x-rst License-File: LICENSE.txt Provides-Extra: brotli -Requires-Dist: brotlipy (>=0.6.0) ; extra == 'brotli' +Requires-Dist: brotlicffi (>=0.8.0) ; ((os_name != "nt" or python_version >= "3") and platform_python_implementation != "CPython") and extra == 'brotli' +Requires-Dist: brotli (>=1.0.9) ; ((os_name != "nt" or python_version >= "3") and platform_python_implementation == "CPython") and extra == 'brotli' +Requires-Dist: brotlipy (>=0.6.0) ; (os_name == "nt" and python_version < "3") and extra == 'brotli' Provides-Extra: secure Requires-Dist: pyOpenSSL (>=0.14) ; extra == 'secure' Requires-Dist: cryptography (>=1.3.4) ; extra == 'secure' Requires-Dist: idna (>=2.0.0) ; extra == 'secure' Requires-Dist: certifi ; extra == 'secure' +Requires-Dist: urllib3-secure-extra ; extra == 'secure' Requires-Dist: ipaddress ; (python_version == "2.7") and extra == 'secure' Provides-Extra: socks Requires-Dist: PySocks (!=1.5.7,<2.0,>=1.5.6) ; extra == 'socks' @@ -81,8 +82,10 @@ urllib3 can be installed with `pip `_:: Alternatively, you can grab the latest source code from `GitHub `_:: - $ git clone git://github.com/urllib3/urllib3.git - $ python setup.py install + $ git clone https://github.com/urllib3/urllib3.git + $ cd urllib3 + $ git checkout 1.26.x + $ pip install . Documentation @@ -151,10 +154,68 @@ For Enterprise Changes ======= +1.26.15 (2023-03-10) +-------------------- + +* Fix socket timeout value when ``HTTPConnection`` is reused (`#2645 `__) +* Remove "!" character from the unreserved characters in IPv6 Zone ID parsing + (`#2899 `__) +* Fix IDNA handling of '\x80' byte (`#2901 `__) + +1.26.14 (2023-01-11) +-------------------- + +* Fixed parsing of port 0 (zero) returning None, instead of 0. (`#2850 `__) +* Removed deprecated getheaders() calls in contrib module. + +1.26.13 (2022-11-23) +-------------------- + +* Deprecated the ``HTTPResponse.getheaders()`` and ``HTTPResponse.getheader()`` methods. +* Fixed an issue where parsing a URL with leading zeroes in the port would be rejected + even when the port number after removing the zeroes was valid. +* Fixed a deprecation warning when using cryptography v39.0.0. +* Removed the ``<4`` in the ``Requires-Python`` packaging metadata field. + + +1.26.12 (2022-08-22) +-------------------- + +* Deprecated the `urllib3[secure]` extra and the `urllib3.contrib.pyopenssl` module. + Both will be removed in v2.x. See this `GitHub issue `_ + for justification and info on how to migrate. + + +1.26.11 (2022-07-25) +-------------------- + +* Fixed an issue where reading more than 2 GiB in a call to ``HTTPResponse.read`` would + raise an ``OverflowError`` on Python 3.9 and earlier. + + +1.26.10 (2022-07-07) +-------------------- + +* Removed support for Python 3.5 +* Fixed an issue where a ``ProxyError`` recommending configuring the proxy as HTTP + instead of HTTPS could appear even when an HTTPS proxy wasn't configured. + + +1.26.9 (2022-03-16) +------------------- + +* Changed ``urllib3[brotli]`` extra to favor installing Brotli libraries that are still + receiving updates like ``brotli`` and ``brotlicffi`` instead of ``brotlipy``. + This change does not impact behavior of urllib3, only which dependencies are installed. +* Fixed a socket leaking when ``HTTPSConnection.connect()`` raises an exception. +* Fixed ``server_hostname`` being forwarded from ``PoolManager`` to ``HTTPConnectionPool`` + when requesting an HTTP URL. Should only be forwarded when requesting an HTTPS URL. + + 1.26.8 (2022-01-07) ------------------- -* Added extra message to``urllib3.exceptions.ProxyError`` when urllib3 detects that +* Added extra message to ``urllib3.exceptions.ProxyError`` when urllib3 detects that a proxy is configured to use HTTPS but the proxy itself appears to only use HTTP. * Added a mention of the size of the connection pool when discarding a connection due to the pool being full. * Added explicit support for Python 3.11. @@ -1409,5 +1470,3 @@ Changes ---------------- * First release. - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/RECORD new file mode 100644 index 0000000..176c67f --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/RECORD @@ -0,0 +1,44 @@ +urllib3-1.26.15.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +urllib3-1.26.15.dist-info/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 +urllib3-1.26.15.dist-info/METADATA,sha256=blT5BzPICGt1qpMvC6GdfIBUNNMobmh1bXPyuOQmq30,48125 +urllib3-1.26.15.dist-info/RECORD,, +urllib3-1.26.15.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110 +urllib3-1.26.15.dist-info/top_level.txt,sha256=EMiXL2sKrTcmrMxIHTqdc3ET54pQI2Y072LexFEemvo,8 +urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333 +urllib3/_collections.py,sha256=Rp1mVyBgc_UlAcp6M3at1skJBXR5J43NawRTvW2g_XY,10811 +urllib3/_version.py,sha256=vFwhFPO1DTzD8xawsdSDwriGSheS7LurJQL9fSgM_IM,64 +urllib3/connection.py,sha256=92k9td_y4PEiTIjNufCUa1NzMB3J3w0LEdyokYgXnW8,20300 +urllib3/connectionpool.py,sha256=u7I7TzJTsicVoNjGeZkCD5LANp_GCeDNBwXZoGHHVLo,39128 +urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 +urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 +urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 +urllib3/contrib/appengine.py,sha256=6IBW6lPOoVUxASPwtn6IH1AATe5DK3lLJCfwyWlLKAE,11012 +urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528 +urllib3/contrib/pyopenssl.py,sha256=4AJAlo9NmjWofY4dJwRa4kbZuRuHfNJxu8Pv6yQk1ss,17055 +urllib3/contrib/securetransport.py,sha256=QOhVbWrFQTKbmV-vtyG69amekkKVxXkdjk9oymaO0Ag,34416 +urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 +urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 +urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 +urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 +urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 +urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665 +urllib3/poolmanager.py,sha256=0KOOJECoeLYVjUHvv-0h4Oq3FFQQ2yb-Fnjkbj8gJO0,19786 +urllib3/request.py,sha256=ZFSIqX0C6WizixecChZ3_okyu7BEv0lZu1VT0s6h4SM,5985 +urllib3/response.py,sha256=UPgLmnHj4z71ZnH8ivYOyncATifTOw9FQukUqDnckCc,30761 +urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 +urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 +urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 +urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 +urllib3/util/request.py,sha256=fWiAaa8pwdLLIqoTLBxCC2e4ed80muzKU3e3HWWTzFQ,4225 +urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 +urllib3/util/retry.py,sha256=4laWh0HpwGijLiBmdBIYtbhYekQnNzzhx2W9uys0RHA,22003 +urllib3/util/ssl_.py,sha256=c0sYiSC6272r6uPkxQpo5rYPP9QC1eR6oI7004gYqZo,17165 +urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758 +urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 +urllib3/util/timeout.py,sha256=cwq4dMk87mJHSBktK1miYJ-85G-3T3RmT20v7SFCpno,10168 +urllib3/util/url.py,sha256=kMxL1k0d-aQm_iZDw_zMmnyYyjrIA_DbsMy3cm3V55M,14279 +urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/WHEEL similarity index 70% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/WHEEL rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/WHEEL index 0b18a28..9d8f872 100644 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/attrs-21.4.0.dist-info/WHEEL +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) +Generator: bdist_wheel (0.38.4) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.15.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/RECORD deleted file mode 100644 index 81dbb05..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/RECORD +++ /dev/null @@ -1,44 +0,0 @@ -urllib3-1.26.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -urllib3-1.26.8.dist-info/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115 -urllib3-1.26.8.dist-info/METADATA,sha256=NhgvW-qlqWyF6-wiibRDgykddUAmma-JFMe0Kq_m8-U,45426 -urllib3-1.26.8.dist-info/RECORD,, -urllib3-1.26.8.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 -urllib3-1.26.8.dist-info/top_level.txt,sha256=EMiXL2sKrTcmrMxIHTqdc3ET54pQI2Y072LexFEemvo,8 -urllib3/__init__.py,sha256=j3yzHIbmW7CS-IKQJ9-PPQf_YKO8EOAey_rMW0UR7us,2763 -urllib3/_collections.py,sha256=Rp1mVyBgc_UlAcp6M3at1skJBXR5J43NawRTvW2g_XY,10811 -urllib3/_version.py,sha256=_NdMUQaeBvFHAX2z3zAIX2Wum58A6rVtY1f7ByHsQ4g,63 -urllib3/connection.py,sha256=6zokyboYYKm9VkyrQvVVLgxMyCZK7n9Vmg_2ZK6pbhc,20076 -urllib3/connectionpool.py,sha256=qz-ICrW6g4TZVCbDQ8fRe68BMpXkskkR9vAVY9zUWtA,39013 -urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 -urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 -urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 -urllib3/contrib/appengine.py,sha256=jz515jZYBDFTnhR4zqfeaCo6JdDgAQqYbqzHK9sDkfw,11010 -urllib3/contrib/ntlmpool.py,sha256=ej9gGvfAb2Gt00lafFp45SIoRz-QwrQ4WChm6gQmAlM,4538 -urllib3/contrib/pyopenssl.py,sha256=YIMyTiXiLPV_QfFw3PjZ31mGqJmM5EzxIjhSLxZ7VUM,16874 -urllib3/contrib/securetransport.py,sha256=izdx43gFoUGFSgxasZlOCL42FaM4vSsAVTmhO0EH1vM,34417 -urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 -urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 -urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 -urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 -urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 -urllib3/packages/six.py,sha256=1LVW7ljqRirFlfExjwl-v1B7vSAUNTmzGMs-qays2zg,34666 -urllib3/poolmanager.py,sha256=whzlX6UTEgODMOCy0ZDMUONRBCz5wyIM8Z9opXAY-Lk,19763 -urllib3/request.py,sha256=ZFSIqX0C6WizixecChZ3_okyu7BEv0lZu1VT0s6h4SM,5985 -urllib3/response.py,sha256=hGhGBh7TkEkh_IQg5C1W_xuPNrgIKv5BUXPyE-q0LuE,28203 -urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 -urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 -urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 -urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 -urllib3/util/request.py,sha256=NnzaEKQ1Pauw5MFMV6HmgEMHITf0Aua9fQuzi2uZzGc,4123 -urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 -urllib3/util/retry.py,sha256=iESg2PvViNdXBRY4MpL4h0kqwOOkHkxmLn1kkhFHPU8,22001 -urllib3/util/ssl_.py,sha256=c0sYiSC6272r6uPkxQpo5rYPP9QC1eR6oI7004gYqZo,17165 -urllib3/util/ssl_match_hostname.py,sha256=w01jCYuwvQ038p9mhc1P1gF8IiTN1qHakThpoukOlbw,5751 -urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 -urllib3/util/timeout.py,sha256=QSbBUNOB9yh6AnDn61SrLQ0hg5oz0I9-uXEG91AJuIg,10003 -urllib3/util/url.py,sha256=au9jkUMnVr9Qp_9kg4HfZx9q9ur6yXQ4u5M17In-UKY,14030 -urllib3/util/wait.py,sha256=3MUKRSAUJDB2tgco7qRUskW0zXGAWYvRRE4Q1_6xlLs,5404 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/WHEEL deleted file mode 100644 index 0b18a28..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3-1.26.8.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/__init__.py old mode 100755 new mode 100644 index fe86b59..c6fa382 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/__init__.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/__init__.py @@ -19,6 +19,23 @@ from .util.timeout import Timeout from .util.url import get_host +# === NOTE TO REPACKAGERS AND VENDORS === +# Please delete this block, this logic is only +# for urllib3 being distributed via PyPI. +# See: https://github.com/urllib3/urllib3/issues/2680 +try: + import urllib3_secure_extra # type: ignore # noqa: F401 +except ImportError: + pass +else: + warnings.warn( + "'urllib3[secure]' extra is deprecated and will be removed " + "in a future release of urllib3 2.x. Read more in this issue: " + "https://github.com/urllib3/urllib3/issues/2680", + category=DeprecationWarning, + stacklevel=2, + ) + __author__ = "Andrey Petrov (andrey.petrov@shazow.net)" __license__ = "MIT" __version__ = __version__ diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/_collections.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/_collections.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/_version.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/_version.py old mode 100755 new mode 100644 index fa8979d..e12dd0e --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/_version.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/_version.py @@ -1,2 +1,2 @@ # This file is protected via CODEOWNERS -__version__ = "1.26.8" +__version__ = "1.26.15" diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connection.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connection.py old mode 100755 new mode 100644 index 4d92ac6..54b96b1 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connection.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connection.py @@ -68,7 +68,7 @@ class BrokenPipeError(Exception): # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. -RECENT_DATE = datetime.date(2020, 7, 1) +RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") @@ -229,6 +229,11 @@ def putheader(self, header, *values): ) def request(self, method, url, body=None, headers=None): + # Update the inner socket's timeout value to send the request. + # This only triggers if the connection is re-used. + if getattr(self, "sock", None) is not None: + self.sock.settimeout(self.timeout) + if headers is None: headers = {} else: @@ -355,17 +360,15 @@ def set_cert( def connect(self): # Add certificate verification - conn = self._new_conn() + self.sock = conn = self._new_conn() hostname = self.host tls_in_tls = False if self._is_using_tunnel(): if self.tls_in_tls_required: - conn = self._connect_tls_proxy(hostname, conn) + self.sock = conn = self._connect_tls_proxy(hostname, conn) tls_in_tls = True - self.sock = conn - # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connectionpool.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connectionpool.py old mode 100755 new mode 100644 index 15bffcb..c23d736 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connectionpool.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/connectionpool.py @@ -379,7 +379,7 @@ def _make_request( timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() - conn.timeout = timeout_obj.connect_timeout + conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) # Trigger any extra validation we need to do. try: @@ -767,6 +767,8 @@ def _is_ssl_error_message_from_http_proxy(ssl_error): isinstance(e, BaseSSLError) and self.proxy and _is_ssl_error_message_from_http_proxy(e) + and conn.proxy + and conn.proxy.scheme == "https" ): e = ProxyError( "Your proxy appears to only use HTTP and not HTTPS, " @@ -860,7 +862,7 @@ def _is_ssl_error_message_from_http_proxy(ssl_error): ) # Check if we should retry the HTTP response. - has_retry_after = bool(response.getheader("Retry-After")) + has_retry_after = bool(response.headers.get("Retry-After")) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_appengine_environ.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_appengine_environ.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/bindings.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/bindings.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/low_level.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/_securetransport/low_level.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/appengine.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/appengine.py old mode 100755 new mode 100644 index f91bdd6..a5a6d91 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/appengine.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/appengine.py @@ -224,7 +224,7 @@ def urlopen( ) # Check if we should retry the HTTP response. - has_retry_after = bool(http_response.getheader("Retry-After")) + has_retry_after = bool(http_response.headers.get("Retry-After")) if retries.is_retry(method, http_response.status, has_retry_after): retries = retries.increment(method, url, response=http_response, _pool=self) log.debug("Retry: %s", url) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/ntlmpool.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/ntlmpool.py old mode 100755 new mode 100644 index 41a8fd1..4716657 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/ntlmpool.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/ntlmpool.py @@ -69,7 +69,7 @@ def _new_conn(self): log.debug("Request headers: %s", headers) conn.request("GET", self.authurl, None, headers) res = conn.getresponse() - reshdr = dict(res.getheaders()) + reshdr = dict(res.headers) log.debug("Response status: %s %s", res.status, res.reason) log.debug("Response headers: %s", reshdr) log.debug("Response data: %s [...]", res.read(100)) @@ -101,7 +101,7 @@ def _new_conn(self): conn.request("GET", self.authurl, None, headers) res = conn.getresponse() log.debug("Response status: %s %s", res.status, res.reason) - log.debug("Response headers: %s", dict(res.getheaders())) + log.debug("Response headers: %s", dict(res.headers)) log.debug("Response data: %s [...]", res.read()[:100]) if res.status != 200: if res.status == 401: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/pyopenssl.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/pyopenssl.py old mode 100755 new mode 100644 index def83af..1ed214b --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/pyopenssl.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/pyopenssl.py @@ -47,10 +47,10 @@ """ from __future__ import absolute_import +import OpenSSL.crypto import OpenSSL.SSL from cryptography import x509 from cryptography.hazmat.backends.openssl import backend as openssl_backend -from cryptography.hazmat.backends.openssl.x509 import _Certificate try: from cryptography.x509 import UnsupportedExtension @@ -73,11 +73,20 @@ class UnsupportedExtension(Exception): import logging import ssl import sys +import warnings from .. import util from ..packages import six from ..util.ssl_ import PROTOCOL_TLS_CLIENT +warnings.warn( + "'urllib3.contrib.pyopenssl' module is deprecated and will be removed " + "in a future release of urllib3 2.x. Read more in this issue: " + "https://github.com/urllib3/urllib3/issues/2680", + category=DeprecationWarning, + stacklevel=2, +) + __all__ = ["inject_into_urllib3", "extract_from_urllib3"] # SNI always works. @@ -219,9 +228,8 @@ def get_subj_alt_name(peer_cert): if hasattr(peer_cert, "to_cryptography"): cert = peer_cert.to_cryptography() else: - # This is technically using private APIs, but should work across all - # relevant versions before PyOpenSSL got a proper API for this. - cert = _Certificate(openssl_backend, peer_cert._x509) + der = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, peer_cert) + cert = x509.load_der_x509_certificate(der, openssl_backend) # We want to find the SAN extension. Ask Cryptography to locate it (it's # faster than looping in Python) @@ -406,7 +414,6 @@ def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) - else: # Platform-specific: Python 3 makefile = backport_makefile diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/securetransport.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/securetransport.py old mode 100755 new mode 100644 index 554c015..6c46a3b --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/securetransport.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/securetransport.py @@ -770,7 +770,6 @@ def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) - else: # Platform-specific: Python 3 def makefile(self, mode="r", buffering=None, *args, **kwargs): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/socks.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/contrib/socks.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/exceptions.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/exceptions.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/fields.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/fields.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/filepost.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/filepost.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/backports/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/backports/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/backports/makefile.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/backports/makefile.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/six.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/six.py old mode 100755 new mode 100644 index ba50acb..f099a3d --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/six.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/packages/six.py @@ -772,7 +772,6 @@ def reraise(tp, value, tb=None): value = None tb = None - else: def exec_(_code_, _globs_=None, _locs_=None): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/poolmanager.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/poolmanager.py old mode 100755 new mode 100644 index 3a31a28..ca4ec34 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/poolmanager.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/poolmanager.py @@ -34,6 +34,7 @@ "ca_cert_dir", "ssl_context", "key_password", + "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/request.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/request.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/response.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/response.py old mode 100755 new mode 100644 index 38693f4..0bd13d4 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/response.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/response.py @@ -2,16 +2,22 @@ import io import logging +import sys +import warnings import zlib from contextlib import contextmanager from socket import error as SocketError from socket import timeout as SocketTimeout try: - import brotli + try: + import brotlicffi as brotli + except ImportError: + import brotli except ImportError: brotli = None +from . import util from ._collections import HTTPHeaderDict from .connection import BaseSSLError, HTTPException from .exceptions import ( @@ -478,6 +484,54 @@ def _error_catcher(self): if self._original_response and self._original_response.isclosed(): self.release_conn() + def _fp_read(self, amt): + """ + Read a response with the thought that reading the number of bytes + larger than can fit in a 32-bit int at a time via SSL in some + known cases leads to an overflow error that has to be prevented + if `amt` or `self.length_remaining` indicate that a problem may + happen. + + The known cases: + * 3.8 <= CPython < 3.9.7 because of a bug + https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900. + * urllib3 injected with pyOpenSSL-backed SSL-support. + * CPython < 3.10 only when `amt` does not fit 32-bit int. + """ + assert self._fp + c_int_max = 2 ** 31 - 1 + if ( + ( + (amt and amt > c_int_max) + or (self.length_remaining and self.length_remaining > c_int_max) + ) + and not util.IS_SECURETRANSPORT + and (util.IS_PYOPENSSL or sys.version_info < (3, 10)) + ): + buffer = io.BytesIO() + # Besides `max_chunk_amt` being a maximum chunk size, it + # affects memory overhead of reading a response by this + # method in CPython. + # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum + # chunk size that does not lead to an overflow error, but + # 256 MiB is a compromise. + max_chunk_amt = 2 ** 28 + while amt is None or amt != 0: + if amt is not None: + chunk_amt = min(amt, max_chunk_amt) + amt -= chunk_amt + else: + chunk_amt = max_chunk_amt + data = self._fp.read(chunk_amt) + if not data: + break + buffer.write(data) + del data # to reduce peak memory usage by `max_chunk_amt`. + return buffer.getvalue() + else: + # StringIO doesn't like amt=None + return self._fp.read(amt) if amt is not None else self._fp.read() + def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`http.client.HTTPResponse.read`, but with two additional @@ -510,13 +564,11 @@ def read(self, amt=None, decode_content=None, cache_content=False): fp_closed = getattr(self._fp, "closed", False) with self._error_catcher(): + data = self._fp_read(amt) if not fp_closed else b"" if amt is None: - # cStringIO doesn't like amt=None - data = self._fp.read() if not fp_closed else b"" flush_decoder = True else: cache_content = False - data = self._fp.read(amt) if not fp_closed else b"" if ( amt != 0 and not data ): # Platform-specific: Buggy versions of Python. @@ -612,9 +664,21 @@ def from_httplib(ResponseCls, r, **response_kw): # Backwards-compatibility methods for http.client.HTTPResponse def getheaders(self): + warnings.warn( + "HTTPResponse.getheaders() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.", + category=DeprecationWarning, + stacklevel=2, + ) return self.headers def getheader(self, name, default=None): + warnings.warn( + "HTTPResponse.getheader() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).", + category=DeprecationWarning, + stacklevel=2, + ) return self.headers.get(name, default) # Backwards compatibility for http.cookiejar diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/__init__.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/__init__.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/connection.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/connection.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/proxy.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/proxy.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/queue.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/queue.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/request.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/request.py old mode 100755 new mode 100644 index 2510338..b574b08 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/request.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/request.py @@ -14,7 +14,10 @@ ACCEPT_ENCODING = "gzip,deflate" try: - import brotli as _unused_module_brotli # noqa: F401 + try: + import brotlicffi as _unused_module_brotli # noqa: F401 + except ImportError: + import brotli as _unused_module_brotli # noqa: F401 except ImportError: pass else: diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/response.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/response.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/retry.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/retry.py old mode 100755 new mode 100644 index 3398323..2490d5e --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/retry.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/retry.py @@ -394,7 +394,7 @@ def parse_retry_after(self, retry_after): def get_retry_after(self, response): """Get the value of Retry-After in seconds.""" - retry_after = response.getheader("Retry-After") + retry_after = response.headers.get("Retry-After") if retry_after is None: return None diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssl_.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssl_.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssl_match_hostname.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssl_match_hostname.py old mode 100755 new mode 100644 index a4b4a56..1dd950c --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssl_match_hostname.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssl_match_hostname.py @@ -112,11 +112,9 @@ def match_hostname(cert, hostname): try: # Divergence from upstream: ipaddress can't handle byte str host_ip = ipaddress.ip_address(_to_unicode(hostname)) - except ValueError: - # Not an IP address (common case) - host_ip = None - except UnicodeError: - # Divergence from upstream: Have to deal with ipaddress not taking + except (UnicodeError, ValueError): + # ValueError: Not an IP address (common case) + # UnicodeError: Divergence from upstream: Have to deal with ipaddress not taking # byte strings. addresses should be all ascii, so we consider it not # an ipaddress in this case host_ip = None @@ -124,7 +122,7 @@ def match_hostname(cert, hostname): # Divergence from upstream: Make ipaddress library optional if ipaddress is None: host_ip = None - else: + else: # Defensive raise dnsnames = [] san = cert.get("subjectAltName", ()) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssltransport.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/ssltransport.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/timeout.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/timeout.py old mode 100755 new mode 100644 index ff69593..78e18a6 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/timeout.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/timeout.py @@ -2,9 +2,8 @@ import time -# The default socket timeout, used by httplib to indicate that no timeout was -# specified by the user -from socket import _GLOBAL_DEFAULT_TIMEOUT +# The default socket timeout, used by httplib to indicate that no timeout was; specified by the user +from socket import _GLOBAL_DEFAULT_TIMEOUT, getdefaulttimeout from ..exceptions import TimeoutStateError @@ -116,6 +115,10 @@ def __repr__(self): # __str__ provided for backwards compatibility __str__ = __repr__ + @classmethod + def resolve_default_timeout(cls, timeout): + return getdefaulttimeout() if timeout is cls.DEFAULT_TIMEOUT else timeout + @classmethod def _validate_timeout(cls, value, name): """Check that a timeout attribute is valid. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/url.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/url.py old mode 100755 new mode 100644 index 81a03da..e5682d3 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/url.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/url.py @@ -50,7 +50,7 @@ "(?:(?:%(hex)s:){0,6}%(hex)s)?::", ] -UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._\-~" IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" @@ -63,7 +63,7 @@ BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$") ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$") -_HOST_PORT_PAT = ("^(%s|%s|%s)(?::([0-9]{0,5}))?$") % ( +_HOST_PORT_PAT = ("^(%s|%s|%s)(?::0*?(|0|[1-9][0-9]{0,4}))?$") % ( REG_NAME_PAT, IPV4_PAT, IPV6_ADDRZ_PAT, @@ -279,6 +279,9 @@ def _normalize_host(host, scheme): if scheme in NORMALIZABLE_SCHEMES: is_ipv6 = IPV6_ADDRZ_RE.match(host) if is_ipv6: + # IPv6 hosts of the form 'a::b%zone' are encoded in a URL as + # such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID + # separator as necessary to return a valid RFC 4007 scoped IP. match = ZONE_ID_RE.search(host) if match: start, end = match.span(1) @@ -300,7 +303,7 @@ def _normalize_host(host, scheme): def _idna_encode(name): - if name and any([ord(x) > 128 for x in name]): + if name and any(ord(x) >= 128 for x in name): try: import idna except ImportError: @@ -331,7 +334,7 @@ def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. - This parser is RFC 3986 compliant. + This parser is RFC 3986 and RFC 6874 compliant. The parser logic and helper functions are based heavily on work done in the ``rfc3986`` module. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/wait.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/wait.py old mode 100755 new mode 100644 index c280646..21b4590 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/wait.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/urllib3/util/wait.py @@ -42,7 +42,6 @@ class NoWayToWaitForSocketError(Exception): def _retry_on_intr(fn, timeout): return fn(timeout) - else: # Old and broken Pythons. def _retry_on_intr(fn, timeout): diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/INSTALLER similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/splunktaucclib-5.0.7.dist-info/INSTALLER rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/INSTALLER diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/LICENSE similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools-60.7.1.dist-info/LICENSE rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/LICENSE diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/METADATA new file mode 100644 index 0000000..0a5976b --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/METADATA @@ -0,0 +1,106 @@ +Metadata-Version: 2.1 +Name: zipp +Version: 3.15.0 +Summary: Backport of pathlib-compatible object wrapper for zip files +Home-page: https://github.com/jaraco/zipp +Author: Jason R. Coombs +Author-email: jaraco@jaraco.com +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.7 +License-File: LICENSE +Provides-Extra: docs +Requires-Dist: sphinx (>=3.5) ; extra == 'docs' +Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs' +Requires-Dist: rst.linker (>=1.9) ; extra == 'docs' +Requires-Dist: furo ; extra == 'docs' +Requires-Dist: sphinx-lint ; extra == 'docs' +Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs' +Provides-Extra: testing +Requires-Dist: pytest (>=6) ; extra == 'testing' +Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing' +Requires-Dist: flake8 (<5) ; extra == 'testing' +Requires-Dist: pytest-cov ; extra == 'testing' +Requires-Dist: pytest-enabler (>=1.3) ; extra == 'testing' +Requires-Dist: jaraco.itertools ; extra == 'testing' +Requires-Dist: jaraco.functools ; extra == 'testing' +Requires-Dist: more-itertools ; extra == 'testing' +Requires-Dist: big-O ; extra == 'testing' +Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: pytest-flake8 ; (python_version < "3.12") and extra == 'testing' + +.. image:: https://img.shields.io/pypi/v/zipp.svg + :target: https://pypi.org/project/zipp + +.. image:: https://img.shields.io/pypi/pyversions/zipp.svg + +.. image:: https://github.com/jaraco/zipp/workflows/tests/badge.svg + :target: https://github.com/jaraco/zipp/actions?query=workflow%3A%22tests%22 + :alt: tests + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + :alt: Code style: Black + +.. .. image:: https://readthedocs.org/projects/skeleton/badge/?version=latest +.. :target: https://skeleton.readthedocs.io/en/latest/?badge=latest + +.. image:: https://img.shields.io/badge/skeleton-2023-informational + :target: https://blog.jaraco.com/skeleton + +.. image:: https://tidelift.com/badges/package/pypi/zipp + :target: https://tidelift.com/subscription/pkg/pypi-zipp?utm_source=pypi-zipp&utm_medium=readme + + +A pathlib-compatible Zipfile object wrapper. Official backport of the standard library +`Path object `_. + + +Compatibility +============= + +New features are introduced in this third-party library and later merged +into CPython. The following table indicates which versions of this library +were contributed to different versions in the standard library: + +.. list-table:: + :header-rows: 1 + + * - zipp + - stdlib + * - 3.9 + - 3.12 + * - 3.5 + - 3.11 + * - 3.2 + - 3.10 + * - 3.3 ?? + - 3.9 + * - 1.0 + - 3.8 + + +Usage +===== + +Use ``zipp.Path`` in place of ``zipfile.Path`` on any Python. + +For Enterprise +============== + +Available as part of the Tidelift Subscription. + +This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. + +`Learn more `_. + +Security Contact +================ + +To report a security vulnerability, please use the +`Tidelift security contact `_. +Tidelift will coordinate the fix and disclosure. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/RECORD new file mode 100644 index 0000000..946c832 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/RECORD @@ -0,0 +1,8 @@ +zipp-3.15.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +zipp-3.15.0.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050 +zipp-3.15.0.dist-info/METADATA,sha256=el77dlVTqXoMRoTxqwIdoMNmNAkERCn9v9-XlKjJfAU,3735 +zipp-3.15.0.dist-info/RECORD,, +zipp-3.15.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +zipp-3.15.0.dist-info/top_level.txt,sha256=iAbdoSHfaGqBfVb2XuR9JqSQHCoOsOtG6y9C_LSpqFw,5 +zipp/__init__.py,sha256=ZNjHuOKaFmgrSPvtIlHYFNCvENHqSYuqEVktBgj9cu8,10676 +zipp/py310compat.py,sha256=HQG4If-eI5v4RT82Uagk7PuLsbnJ7HKgj0aQKGRAp8M,309 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/WHEEL new file mode 100644 index 0000000..57e3d84 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/top_level.txt b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/top_level.txt similarity index 100% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/top_level.txt rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.15.0.dist-info/top_level.txt diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/INSTALLER b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/LICENSE b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/LICENSE deleted file mode 100644 index 353924b..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright Jason R. Coombs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/METADATA b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/METADATA deleted file mode 100644 index b1308b5..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/METADATA +++ /dev/null @@ -1,58 +0,0 @@ -Metadata-Version: 2.1 -Name: zipp -Version: 3.7.0 -Summary: Backport of pathlib-compatible object wrapper for zip files -Home-page: https://github.com/jaraco/zipp -Author: Jason R. Coombs -Author-email: jaraco@jaraco.com -License: UNKNOWN -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Requires-Python: >=3.7 -License-File: LICENSE -Provides-Extra: docs -Requires-Dist: sphinx ; extra == 'docs' -Requires-Dist: jaraco.packaging (>=8.2) ; extra == 'docs' -Requires-Dist: rst.linker (>=1.9) ; extra == 'docs' -Provides-Extra: testing -Requires-Dist: pytest (>=6) ; extra == 'testing' -Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing' -Requires-Dist: pytest-flake8 ; extra == 'testing' -Requires-Dist: pytest-cov ; extra == 'testing' -Requires-Dist: pytest-enabler (>=1.0.1) ; extra == 'testing' -Requires-Dist: jaraco.itertools ; extra == 'testing' -Requires-Dist: func-timeout ; extra == 'testing' -Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing' -Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing' - -.. image:: https://img.shields.io/pypi/v/zipp.svg - :target: `PyPI link`_ - -.. image:: https://img.shields.io/pypi/pyversions/zipp.svg - :target: `PyPI link`_ - -.. _PyPI link: https://pypi.org/project/zipp - -.. image:: https://github.com/jaraco/zipp/workflows/tests/badge.svg - :target: https://github.com/jaraco/zipp/actions?query=workflow%3A%22tests%22 - :alt: tests - -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - :alt: Code style: Black - -.. .. image:: https://readthedocs.org/projects/skeleton/badge/?version=latest -.. :target: https://skeleton.readthedocs.io/en/latest/?badge=latest - -.. image:: https://img.shields.io/badge/skeleton-2021-informational - :target: https://blog.jaraco.com/skeleton - - -A pathlib-compatible Zipfile object wrapper. Official backport of the standard library -`Path object `_. - - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/RECORD b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/RECORD deleted file mode 100644 index 1b0af29..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/RECORD +++ /dev/null @@ -1,7 +0,0 @@ -zipp-3.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -zipp-3.7.0.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050 -zipp-3.7.0.dist-info/METADATA,sha256=ZLzgaXTyZX_MxTU0lcGfhdPY4CjFrT_3vyQ2Fo49pl8,2261 -zipp-3.7.0.dist-info/RECORD,, -zipp-3.7.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 -zipp-3.7.0.dist-info/top_level.txt,sha256=iAbdoSHfaGqBfVb2XuR9JqSQHCoOsOtG6y9C_LSpqFw,5 -zipp.py,sha256=ajztOH-9I7KA_4wqDYygtHa6xUBVZgFpmZ8FE74HHHI,8425 diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/WHEEL b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/WHEEL deleted file mode 100644 index becc9a6..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp-3.7.0.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp.py deleted file mode 100755 index 26b723c..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp.py +++ /dev/null @@ -1,329 +0,0 @@ -import io -import posixpath -import zipfile -import itertools -import contextlib -import sys -import pathlib - -if sys.version_info < (3, 7): - from collections import OrderedDict -else: - OrderedDict = dict - - -__all__ = ['Path'] - - -def _parents(path): - """ - Given a path with elements separated by - posixpath.sep, generate all parents of that path. - - >>> list(_parents('b/d')) - ['b'] - >>> list(_parents('/b/d/')) - ['/b'] - >>> list(_parents('b/d/f/')) - ['b/d', 'b'] - >>> list(_parents('b')) - [] - >>> list(_parents('')) - [] - """ - return itertools.islice(_ancestry(path), 1, None) - - -def _ancestry(path): - """ - Given a path with elements separated by - posixpath.sep, generate all elements of that path - - >>> list(_ancestry('b/d')) - ['b/d', 'b'] - >>> list(_ancestry('/b/d/')) - ['/b/d', '/b'] - >>> list(_ancestry('b/d/f/')) - ['b/d/f', 'b/d', 'b'] - >>> list(_ancestry('b')) - ['b'] - >>> list(_ancestry('')) - [] - """ - path = path.rstrip(posixpath.sep) - while path and path != posixpath.sep: - yield path - path, tail = posixpath.split(path) - - -_dedupe = OrderedDict.fromkeys -"""Deduplicate an iterable in original order""" - - -def _difference(minuend, subtrahend): - """ - Return items in minuend not in subtrahend, retaining order - with O(1) lookup. - """ - return itertools.filterfalse(set(subtrahend).__contains__, minuend) - - -class CompleteDirs(zipfile.ZipFile): - """ - A ZipFile subclass that ensures that implied directories - are always included in the namelist. - """ - - @staticmethod - def _implied_dirs(names): - parents = itertools.chain.from_iterable(map(_parents, names)) - as_dirs = (p + posixpath.sep for p in parents) - return _dedupe(_difference(as_dirs, names)) - - def namelist(self): - names = super(CompleteDirs, self).namelist() - return names + list(self._implied_dirs(names)) - - def _name_set(self): - return set(self.namelist()) - - def resolve_dir(self, name): - """ - If the name represents a directory, return that name - as a directory (with the trailing slash). - """ - names = self._name_set() - dirname = name + '/' - dir_match = name not in names and dirname in names - return dirname if dir_match else name - - @classmethod - def make(cls, source): - """ - Given a source (filename or zipfile), return an - appropriate CompleteDirs subclass. - """ - if isinstance(source, CompleteDirs): - return source - - if not isinstance(source, zipfile.ZipFile): - return cls(_pathlib_compat(source)) - - # Only allow for FastLookup when supplied zipfile is read-only - if 'r' not in source.mode: - cls = CompleteDirs - - source.__class__ = cls - return source - - -class FastLookup(CompleteDirs): - """ - ZipFile subclass to ensure implicit - dirs exist and are resolved rapidly. - """ - - def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super(FastLookup, self).namelist() - return self.__names - - def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super(FastLookup, self)._name_set() - return self.__lookup - - -def _pathlib_compat(path): - """ - For path-like objects, convert to a filename for compatibility - on Python 3.6.1 and earlier. - """ - try: - return path.__fspath__() - except AttributeError: - return str(path) - - -class Path: - """ - A pathlib-compatible interface for zip files. - - Consider a zip file with this structure:: - - . - ├── a.txt - └── b - ├── c.txt - └── d - └── e.txt - - >>> data = io.BytesIO() - >>> zf = zipfile.ZipFile(data, 'w') - >>> zf.writestr('a.txt', 'content of a') - >>> zf.writestr('b/c.txt', 'content of c') - >>> zf.writestr('b/d/e.txt', 'content of e') - >>> zf.filename = 'mem/abcde.zip' - - Path accepts the zipfile object itself or a filename - - >>> root = Path(zf) - - From there, several path operations are available. - - Directory iteration (including the zip file itself): - - >>> a, b = root.iterdir() - >>> a - Path('mem/abcde.zip', 'a.txt') - >>> b - Path('mem/abcde.zip', 'b/') - - name property: - - >>> b.name - 'b' - - join with divide operator: - - >>> c = b / 'c.txt' - >>> c - Path('mem/abcde.zip', 'b/c.txt') - >>> c.name - 'c.txt' - - Read text: - - >>> c.read_text() - 'content of c' - - existence: - - >>> c.exists() - True - >>> (b / 'missing.txt').exists() - False - - Coercion to string: - - >>> import os - >>> str(c).replace(os.sep, posixpath.sep) - 'mem/abcde.zip/b/c.txt' - - At the root, ``name``, ``filename``, and ``parent`` - resolve to the zipfile. Note these attributes are not - valid and will raise a ``ValueError`` if the zipfile - has no filename. - - >>> root.name - 'abcde.zip' - >>> str(root.filename).replace(os.sep, posixpath.sep) - 'mem/abcde.zip' - >>> str(root.parent) - 'mem' - """ - - __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" - - def __init__(self, root, at=""): - """ - Construct a Path from a ZipFile or filename. - - Note: When the source is an existing ZipFile object, - its type (__class__) will be mutated to a - specialized type. If the caller wishes to retain the - original type, the caller should either create a - separate ZipFile object or pass a filename. - """ - self.root = FastLookup.make(root) - self.at = at - - def open(self, mode='r', *args, pwd=None, **kwargs): - """ - Open this entry as text or binary following the semantics - of ``pathlib.Path.open()`` by passing arguments through - to io.TextIOWrapper(). - """ - if self.is_dir(): - raise IsADirectoryError(self) - zip_mode = mode[0] - if not self.exists() and zip_mode == 'r': - raise FileNotFoundError(self) - stream = self.root.open(self.at, zip_mode, pwd=pwd) - if 'b' in mode: - if args or kwargs: - raise ValueError("encoding args invalid for binary operation") - return stream - return io.TextIOWrapper(stream, *args, **kwargs) - - @property - def name(self): - return pathlib.Path(self.at).name or self.filename.name - - @property - def suffix(self): - return pathlib.Path(self.at).suffix or self.filename.suffix - - @property - def suffixes(self): - return pathlib.Path(self.at).suffixes or self.filename.suffixes - - @property - def stem(self): - return pathlib.Path(self.at).stem or self.filename.stem - - @property - def filename(self): - return pathlib.Path(self.root.filename).joinpath(self.at) - - def read_text(self, *args, **kwargs): - with self.open('r', *args, **kwargs) as strm: - return strm.read() - - def read_bytes(self): - with self.open('rb') as strm: - return strm.read() - - def _is_child(self, path): - return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") - - def _next(self, at): - return self.__class__(self.root, at) - - def is_dir(self): - return not self.at or self.at.endswith("/") - - def is_file(self): - return self.exists() and not self.is_dir() - - def exists(self): - return self.at in self.root._name_set() - - def iterdir(self): - if not self.is_dir(): - raise ValueError("Can't listdir a file") - subs = map(self._next, self.root.namelist()) - return filter(self._is_child, subs) - - def __str__(self): - return posixpath.join(self.root.filename, self.at) - - def __repr__(self): - return self.__repr.format(self=self) - - def joinpath(self, *other): - next = posixpath.join(self.at, *map(_pathlib_compat, other)) - return self._next(self.root.resolve_dir(next)) - - __truediv__ = joinpath - - @property - def parent(self): - if not self.at: - return self.filename.parent - parent_at = posixpath.dirname(self.at.rstrip('/')) - if parent_at: - parent_at += '/' - return self._next(parent_at) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/zipp.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp/__init__.py old mode 100755 new mode 100644 similarity index 71% rename from Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/zipp.py rename to Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp/__init__.py index 26b723c..ddfa0a6 --- a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pkg_resources/_vendor/zipp.py +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp/__init__.py @@ -3,13 +3,11 @@ import zipfile import itertools import contextlib -import sys import pathlib +import re +import fnmatch -if sys.version_info < (3, 7): - from collections import OrderedDict -else: - OrderedDict = dict +from .py310compat import text_encoding __all__ = ['Path'] @@ -56,7 +54,7 @@ def _ancestry(path): path, tail = posixpath.split(path) -_dedupe = OrderedDict.fromkeys +_dedupe = dict.fromkeys """Deduplicate an iterable in original order""" @@ -68,10 +66,33 @@ def _difference(minuend, subtrahend): return itertools.filterfalse(set(subtrahend).__contains__, minuend) -class CompleteDirs(zipfile.ZipFile): +class InitializedState: + """ + Mix-in to save the initialization state for pickling. + """ + + def __init__(self, *args, **kwargs): + self.__args = args + self.__kwargs = kwargs + super().__init__(*args, **kwargs) + + def __getstate__(self): + return self.__args, self.__kwargs + + def __setstate__(self, state): + args, kwargs = state + super().__init__(*args, **kwargs) + + +class CompleteDirs(InitializedState, zipfile.ZipFile): """ A ZipFile subclass that ensures that implied directories are always included in the namelist. + + >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt'])) + ['foo/', 'foo/bar/'] + >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt', 'foo/bar/'])) + ['foo/'] """ @staticmethod @@ -81,7 +102,7 @@ def _implied_dirs(names): return _dedupe(_difference(as_dirs, names)) def namelist(self): - names = super(CompleteDirs, self).namelist() + names = super().namelist() return names + list(self._implied_dirs(names)) def _name_set(self): @@ -97,6 +118,17 @@ def resolve_dir(self, name): dir_match = name not in names and dirname in names return dirname if dir_match else name + def getinfo(self, name): + """ + Supplement getinfo for implied dirs. + """ + try: + return super().getinfo(name) + except KeyError: + if not name.endswith('/') or name not in self._name_set(): + raise + return zipfile.ZipInfo(filename=name) + @classmethod def make(cls, source): """ @@ -107,7 +139,7 @@ def make(cls, source): return source if not isinstance(source, zipfile.ZipFile): - return cls(_pathlib_compat(source)) + return cls(source) # Only allow for FastLookup when supplied zipfile is read-only if 'r' not in source.mode: @@ -126,25 +158,19 @@ class FastLookup(CompleteDirs): def namelist(self): with contextlib.suppress(AttributeError): return self.__names - self.__names = super(FastLookup, self).namelist() + self.__names = super().namelist() return self.__names def _name_set(self): with contextlib.suppress(AttributeError): return self.__lookup - self.__lookup = super(FastLookup, self)._name_set() + self.__lookup = super()._name_set() return self.__lookup -def _pathlib_compat(path): - """ - For path-like objects, convert to a filename for compatibility - on Python 3.6.1 and earlier. - """ - try: - return path.__fspath__() - except AttributeError: - return str(path) +def _extract_text_encoding(encoding=None, *args, **kwargs): + # stacklevel=3 so that the caller of the caller see any warning. + return text_encoding(encoding, 3), args, kwargs class Path: @@ -196,7 +222,7 @@ class Path: Read text: - >>> c.read_text() + >>> c.read_text(encoding='utf-8') 'content of c' existence: @@ -240,6 +266,18 @@ def __init__(self, root, at=""): self.root = FastLookup.make(root) self.at = at + def __eq__(self, other): + """ + >>> Path(zipfile.ZipFile(io.BytesIO(), 'w')) == 'foo' + False + """ + if self.__class__ is not other.__class__: + return NotImplemented + return (self.root, self.at) == (other.root, other.at) + + def __hash__(self): + return hash((self.root, self.at)) + def open(self, mode='r', *args, pwd=None, **kwargs): """ Open this entry as text or binary following the semantics @@ -256,7 +294,9 @@ def open(self, mode='r', *args, pwd=None, **kwargs): if args or kwargs: raise ValueError("encoding args invalid for binary operation") return stream - return io.TextIOWrapper(stream, *args, **kwargs) + # Text mode: + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + return io.TextIOWrapper(stream, encoding, *args, **kwargs) @property def name(self): @@ -279,7 +319,8 @@ def filename(self): return pathlib.Path(self.root.filename).joinpath(self.at) def read_text(self, *args, **kwargs): - with self.open('r', *args, **kwargs) as strm: + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + with self.open('r', encoding, *args, **kwargs) as strm: return strm.read() def read_bytes(self): @@ -307,6 +348,38 @@ def iterdir(self): subs = map(self._next, self.root.namelist()) return filter(self._is_child, subs) + def match(self, path_pattern): + return pathlib.Path(self.at).match(path_pattern) + + def is_symlink(self): + """ + Return whether this path is a symlink. Always false (python/cpython#82102). + """ + return False + + def _descendants(self): + for child in self.iterdir(): + yield child + if child.is_dir(): + yield from child._descendants() + + def glob(self, pattern): + if not pattern: + raise ValueError(f"Unacceptable pattern: {pattern!r}") + + matches = re.compile(fnmatch.translate(pattern)).fullmatch + return ( + child + for child in self._descendants() + if matches(str(child.relative_to(self))) + ) + + def rglob(self, pattern): + return self.glob(f'**/{pattern}') + + def relative_to(self, other, *extra): + return posixpath.relpath(str(self), str(other.joinpath(*extra))) + def __str__(self): return posixpath.join(self.root.filename, self.at) @@ -314,7 +387,7 @@ def __repr__(self): return self.__repr.format(self=self) def joinpath(self, *other): - next = posixpath.join(self.at, *map(_pathlib_compat, other)) + next = posixpath.join(self.at, *other) return self._next(self.root.resolve_dir(next)) __truediv__ = joinpath diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp/py310compat.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp/py310compat.py new file mode 100644 index 0000000..8244124 --- /dev/null +++ b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/zipp/py310compat.py @@ -0,0 +1,12 @@ +import sys +import io + + +te_impl = 'lambda encoding, stacklevel=2, /: encoding' +te_impl_37 = te_impl.replace(', /', '') +_text_encoding = eval(te_impl) if sys.version_info > (3, 8) else eval(te_impl_37) + + +text_encoding = ( + io.text_encoding if sys.version_info > (3, 10) else _text_encoding # type: ignore +) diff --git a/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter_declare.py b/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter_declare.py old mode 100755 new mode 100644 diff --git a/Splunk-TA-cisco-dnacenter/default/addon_builder.conf b/Splunk-TA-cisco-dnacenter/default/addon_builder.conf index 8af9685..ae336ae 100644 --- a/Splunk-TA-cisco-dnacenter/default/addon_builder.conf +++ b/Splunk-TA-cisco-dnacenter/default/addon_builder.conf @@ -1,7 +1,6 @@ # this file is generated by add-on builder automatically # please do not edit it [base] -builder_version = 4.1.0 +builder_version = 4.1.3 builder_build = 0 -is_edited = 1 - +is_edited = 1 \ No newline at end of file diff --git a/Splunk-TA-cisco-dnacenter/default/splunk_ta_cisco_dnacenter_settings.conf b/Splunk-TA-cisco-dnacenter/default/splunk_ta_cisco_dnacenter_settings.conf index 7cef69e..2fb99b7 100644 --- a/Splunk-TA-cisco-dnacenter/default/splunk_ta_cisco_dnacenter_settings.conf +++ b/Splunk-TA-cisco-dnacenter/default/splunk_ta_cisco_dnacenter_settings.conf @@ -1,2 +1,3 @@ [logging] +loglevel = INFO diff --git a/Splunk-TA-cisco-dnacenter/static/appIcon.png b/Splunk-TA-cisco-dnacenter/static/appIcon.png index 1138d2f0f7d3c99cbc099492053a868bcce989c5..613f75858fc68861da44aa3b6d9b01d6e3968207 100644 GIT binary patch delta 2277 zcmVF+Ju8_IYM9%kASjCjJC8r#a7876E;JkimYKVLMX*nipF4)Lly$r zW_$CNd;7hKNh1)@QhWS^``668d*A)t-}n8#?{^2IZ`i=!Fn>JgKA^ir1V83n_;5f% zX*|IWsv*8>0p=LP1Zzk^K@G81Bp5!4Gn)QYR=lbOAx!V~;@ds%zjnV}W^}lQ#DSVg z>?83elWH+>2#H9L0>QYJ`U*TgYw7bU)_#N-8i*M)eEuTF2xb^j4O8M{3^fyuUPO#L z!?5^1r)rl@{(og%>z4wRdq5l%8R+kgbc~ejk^NE7J04Logz>yX6$FdM8Gr_ZoC-##~lKxj^vhp*;8iiik>hDfTChZvm%F^&!cz(^qs)e#P_PY`OidpsUl z|2g873b$%9E|3LdRF_D2IjGva-ffD3d8|yT$A1#?`&!!ktw|_|TuxxPQ@OVVl-Pgb zqAA(S9Wqaz(kF@j_LmPHoQf{gnMP+J_>h6g@HE>{@%+aHX6M3Px1k8#MJGVQf=>{fdP zG=K5Qg72vcF4h-1hWgLY|sB;oRk~1ZdxhbmztmnPn7(=0N>e04KC{B`>J7h%n1BF%o%DHv>;0xd!1#q={NU}C z+vWv&f+{_p*b3z$3UOMtbNq#U|9^8pPUk|?wUBA7%9H~dQ$;w;yT*#&TPSxqUBYqCC-^az++Ky|vfr+L;e zXb-X0sv9rXZ_229e5O0jt9ERLvNr`1JKS2J^L$J={&x==k~VuhF0J%rawbMHv@!kz zSNhq;dj_O$^1$+U`UPWbKYtCGt@A0w#MaWZt$FFh7s4!M=RZz0r2yJ@G5`}mks(S* z1t+5X)1D1sCVP&r1n9A!tQg5c!~?ovCSTjo%*?8e*rF1vCMUQglWm@CG>^urqKbjQ z<>>mR8MSMteFlwKY?LdOXq5d7I_03S;xN93Fxf4eEd`auZrwX|w|@>#d6JV5ypl51 zU?j@i=_!A|+H0l$!4LBv2y#M>49oqhZS~yO<}2|jvFScz5%TL$AbwIROx z(`RZX+YX`hbTxIxvSA8R7K8bn4S8e%M3i9EL=jnOi45#H{HIsJ`gV5qvh!m`NPaXE z3rxDf(~a{+RVuv#Dt|1@40Y1WaBPvsBC7opIykf?B$y{_22p^G7*SNo81$Y?2sTc; zyljTID;&vL%bq|?%hN*#<_jDz(x7K`FplK%GC30ru^WTzvRqy5S1T*G>E^DRwYvaf zc*y!wR=>Z^<+aBwl`PglnjnO|x9;);IaoQUvG(bIxMy7KnSUEjXOXo|t^Nh0XceX` zvgb4Kg(0LH7BEl3PMT1xAc3*d%Awsy1 zDaW4g70~NLAAcH6c;&B@GnhLg*L70>{~HXPx3skvYS!SMmb}dF)Aaf#ag&dqeDy4mPu_a60E(n?j~U5XKx#*nbmLV@6QjZEqhyy9znhh{t9v z*#Hs920$_3`zkq}yMIZwqhb`v zE7**%CQvZtDn-}L2JP_ue?RWV>3xRo#?Ze3iv;Oa@J}uO00000NkvXXu0mjfmqujO delta 2213 zcmV;W2wL~?5wQ`FB!75GL_t(oN9~wEv40Q#j`5&dKm&OMXXGxp zlX8?r5KcrKiQO-7%sD2QB8mbW@hb?E6J~_|UFC|pjF8F(n)pV~hp&7$RtvLx!=gNr z%JLx1WH#$&vw)C<1T7edZ#`QFKt(Sy#2-bRj|BgjA`<}(GJYuh^#bTcO!yIugj$Y; z1A>l^e)Qg|D}P@Js&J=#yWKzBZ+9ez>@mYh(D!4QGl>aeB9$a76-^)lK}6Sl*3K*c z#N(#@t`#PZh2I`^`Z3grF?pY3yoGbAs&2;$cYjcE>dt6x5=SAkHJ1uxmfsc|iwK5w z-eB)?m@yQj1`z*BEB*4IqZkXXTVlkw_WWgK+YR4W_kSu5Q}{m3Y+2xoO_&58=r4dc zW{gRpz?J_M$Y8jDG2Se9Mm<+mrRvv0B`n!CB@maeBoGsyF#rq#7*HqB=4d(d@^wJ4 zj6X+9%3{Br>o`CZJ;(O#n#oPJ0NVNl)vx8Kze&*8!x8E#!(TU5U>wz$S1y)@Is}3Pm#dkL# zQ3-mYEtn+fh|L36PX^<05r8m2ox=hy>5$HDY;Za?YT(}({fzm*w6QEZ?tF9es+*KZmUA5;a&1Rn;vPLCg#+j&xF3ExR)#gi{ zoz0^EBvSLcz$-pVyBi`r2ljbrP^R_OY+R-H2{h}uO+Ny^yluQF5%1`S#~<@doT!Rg zw0|cPZp~qw&oEex{bQe3<+zuu%ev zpSBYHiK*j{9Q#^86*VdM!VYyDKb_#&Ryk@E=j<88&|TezFv15R@|JO#esC%&I}u!& zf&}7iUi#C27-P29q-9~Gi(!P08;W|ZXQg#Ed4KeoY%&bppZW?GyO=>0VmFeKbvb+h07o_JF8H$wx zm=SppBCwedY8DV)9zszY>s_k9HqLWtX-*D8W2tNsS|(0`b*0-=dJ zPi4!0yEtQ%t|*S^ljm17t(|@@^MRrSF0*xzn;rz5uF?Y6#}O9(G+aqr=XB=Nis~^r zSfC;eYai!lH?-U|kYQx#^V^2`;u9X%%+{xwAaGlodqZ3Cq~#Hn9srdV!*x3RN{}=| zkO%4n5f|^TEbUUV_vdQ?&3}G=V=|-IX|z*8Z^u24=QMUY)GfAf)ZqM(C3n6YNn60VxVYhPVi;RY7nFKE^0+;?0_R$>?JTQ!ar*Sppjy;D!=aDJGY2lFoh zC7TsNFPN73uM2%<>Zgj|7^)L#jt({0$J&+^yt+C(Xlz_|z!p~oRe$_`s6oW2;sVv1 z9j8ysK5vTIjnr&CBj?#sNDLP-hGv2>D*!M6azlf@gb{z`$CVCfeXmybt#!$xsU|!E z-9oU<#MaCmQy=aVXxFZJp{pz8*r7VD$TE0H&@7i}Cd>WQBpW>uC%PU~{GLOm3k?rl zT0P6t?G8woj{)pF7=MxuyW6OB6mnE25>O%4SS<^g+DtwTK0dv^euL56^{6%g5Om`u z`)oen<$O=9JDhkyL#`gE>8>A4?A~q*0B{oJJNe^Kv!T3+C9Gp0hdDRw)cJp z^{x)?y$i|okSr-*m=(Ec*I(7n?lFA=9T@-pBqGV{I~+qt!+&aJSSV!TuGJdZ*l5TP z+FQ@BX<-K|ppm=oSd=+^(f1eyl`#J;yx-js7+UBs1JqYI(ZI4IIV*$-InEJ%u}`4g z(#am;>KsYB%{~?*_8U;@=hsG9 z@9z;PKPUJzoPVCX@FFxwMyn};#Sk0_U!Ia;D_%bQ(Vu|d_Zk(mM@~was0FHJzju?| z(zeT8I>TtV`ut4Hd)lUj%qd1!0Ef=Uz{?73@V-tTI`rQycwFwnQkc=G;PK$!dKmu4 z{oB`6n9FL@i6A&NYt-!|9HyccY61D*m$dLUxzk84DbHpe0%%MS!1WWfnJ z6P#4-ePvSnqbI?=HT`yMfnrneLtNTL4#5b3}u#|qJJ00000NkvXXu0mjfViPM7 diff --git a/Splunk-TA-cisco-dnacenter/static/appIconAlt.png b/Splunk-TA-cisco-dnacenter/static/appIconAlt.png index 1138d2f0f7d3c99cbc099492053a868bcce989c5..613f75858fc68861da44aa3b6d9b01d6e3968207 100644 GIT binary patch delta 2277 zcmVF+Ju8_IYM9%kASjCjJC8r#a7876E;JkimYKVLMX*nipF4)Lly$r zW_$CNd;7hKNh1)@QhWS^``668d*A)t-}n8#?{^2IZ`i=!Fn>JgKA^ir1V83n_;5f% zX*|IWsv*8>0p=LP1Zzk^K@G81Bp5!4Gn)QYR=lbOAx!V~;@ds%zjnV}W^}lQ#DSVg z>?83elWH+>2#H9L0>QYJ`U*TgYw7bU)_#N-8i*M)eEuTF2xb^j4O8M{3^fyuUPO#L z!?5^1r)rl@{(og%>z4wRdq5l%8R+kgbc~ejk^NE7J04Logz>yX6$FdM8Gr_ZoC-##~lKxj^vhp*;8iiik>hDfTChZvm%F^&!cz(^qs)e#P_PY`OidpsUl z|2g873b$%9E|3LdRF_D2IjGva-ffD3d8|yT$A1#?`&!!ktw|_|TuxxPQ@OVVl-Pgb zqAA(S9Wqaz(kF@j_LmPHoQf{gnMP+J_>h6g@HE>{@%+aHX6M3Px1k8#MJGVQf=>{fdP zG=K5Qg72vcF4h-1hWgLY|sB;oRk~1ZdxhbmztmnPn7(=0N>e04KC{B`>J7h%n1BF%o%DHv>;0xd!1#q={NU}C z+vWv&f+{_p*b3z$3UOMtbNq#U|9^8pPUk|?wUBA7%9H~dQ$;w;yT*#&TPSxqUBYqCC-^az++Ky|vfr+L;e zXb-X0sv9rXZ_229e5O0jt9ERLvNr`1JKS2J^L$J={&x==k~VuhF0J%rawbMHv@!kz zSNhq;dj_O$^1$+U`UPWbKYtCGt@A0w#MaWZt$FFh7s4!M=RZz0r2yJ@G5`}mks(S* z1t+5X)1D1sCVP&r1n9A!tQg5c!~?ovCSTjo%*?8e*rF1vCMUQglWm@CG>^urqKbjQ z<>>mR8MSMteFlwKY?LdOXq5d7I_03S;xN93Fxf4eEd`auZrwX|w|@>#d6JV5ypl51 zU?j@i=_!A|+H0l$!4LBv2y#M>49oqhZS~yO<}2|jvFScz5%TL$AbwIROx z(`RZX+YX`hbTxIxvSA8R7K8bn4S8e%M3i9EL=jnOi45#H{HIsJ`gV5qvh!m`NPaXE z3rxDf(~a{+RVuv#Dt|1@40Y1WaBPvsBC7opIykf?B$y{_22p^G7*SNo81$Y?2sTc; zyljTID;&vL%bq|?%hN*#<_jDz(x7K`FplK%GC30ru^WTzvRqy5S1T*G>E^DRwYvaf zc*y!wR=>Z^<+aBwl`PglnjnO|x9;);IaoQUvG(bIxMy7KnSUEjXOXo|t^Nh0XceX` zvgb4Kg(0LH7BEl3PMT1xAc3*d%Awsy1 zDaW4g70~NLAAcH6c;&B@GnhLg*L70>{~HXPx3skvYS!SMmb}dF)Aaf#ag&dqeDy4mPu_a60E(n?j~U5XKx#*nbmLV@6QjZEqhyy9znhh{t9v z*#Hs920$_3`zkq}yMIZwqhb`v zE7**%CQvZtDn-}L2JP_ue?RWV>3xRo#?Ze3iv;Oa@J}uO00000NkvXXu0mjfmqujO delta 2213 zcmV;W2wL~?5wQ`FB!75GL_t(oN9~wEv40Q#j`5&dKm&OMXXGxp zlX8?r5KcrKiQO-7%sD2QB8mbW@hb?E6J~_|UFC|pjF8F(n)pV~hp&7$RtvLx!=gNr z%JLx1WH#$&vw)C<1T7edZ#`QFKt(Sy#2-bRj|BgjA`<}(GJYuh^#bTcO!yIugj$Y; z1A>l^e)Qg|D}P@Js&J=#yWKzBZ+9ez>@mYh(D!4QGl>aeB9$a76-^)lK}6Sl*3K*c z#N(#@t`#PZh2I`^`Z3grF?pY3yoGbAs&2;$cYjcE>dt6x5=SAkHJ1uxmfsc|iwK5w z-eB)?m@yQj1`z*BEB*4IqZkXXTVlkw_WWgK+YR4W_kSu5Q}{m3Y+2xoO_&58=r4dc zW{gRpz?J_M$Y8jDG2Se9Mm<+mrRvv0B`n!CB@maeBoGsyF#rq#7*HqB=4d(d@^wJ4 zj6X+9%3{Br>o`CZJ;(O#n#oPJ0NVNl)vx8Kze&*8!x8E#!(TU5U>wz$S1y)@Is}3Pm#dkL# zQ3-mYEtn+fh|L36PX^<05r8m2ox=hy>5$HDY;Za?YT(}({fzm*w6QEZ?tF9es+*KZmUA5;a&1Rn;vPLCg#+j&xF3ExR)#gi{ zoz0^EBvSLcz$-pVyBi`r2ljbrP^R_OY+R-H2{h}uO+Ny^yluQF5%1`S#~<@doT!Rg zw0|cPZp~qw&oEex{bQe3<+zuu%ev zpSBYHiK*j{9Q#^86*VdM!VYyDKb_#&Ryk@E=j<88&|TezFv15R@|JO#esC%&I}u!& zf&}7iUi#C27-P29q-9~Gi(!P08;W|ZXQg#Ed4KeoY%&bppZW?GyO=>0VmFeKbvb+h07o_JF8H$wx zm=SppBCwedY8DV)9zszY>s_k9HqLWtX-*D8W2tNsS|(0`b*0-=dJ zPi4!0yEtQ%t|*S^ljm17t(|@@^MRrSF0*xzn;rz5uF?Y6#}O9(G+aqr=XB=Nis~^r zSfC;eYai!lH?-U|kYQx#^V^2`;u9X%%+{xwAaGlodqZ3Cq~#Hn9srdV!*x3RN{}=| zkO%4n5f|^TEbUUV_vdQ?&3}G=V=|-IX|z*8Z^u24=QMUY)GfAf)ZqM(C3n6YNn60VxVYhPVi;RY7nFKE^0+;?0_R$>?JTQ!ar*Sppjy;D!=aDJGY2lFoh zC7TsNFPN73uM2%<>Zgj|7^)L#jt({0$J&+^yt+C(Xlz_|z!p~oRe$_`s6oW2;sVv1 z9j8ysK5vTIjnr&CBj?#sNDLP-hGv2>D*!M6azlf@gb{z`$CVCfeXmybt#!$xsU|!E z-9oU<#MaCmQy=aVXxFZJp{pz8*r7VD$TE0H&@7i}Cd>WQBpW>uC%PU~{GLOm3k?rl zT0P6t?G8woj{)pF7=MxuyW6OB6mnE25>O%4SS<^g+DtwTK0dv^euL56^{6%g5Om`u z`)oen<$O=9JDhkyL#`gE>8>A4?A~q*0B{oJJNe^Kv!T3+C9Gp0hdDRw)cJp z^{x)?y$i|okSr-*m=(Ec*I(7n?lFA=9T@-pBqGV{I~+qt!+&aJSSV!TuGJdZ*l5TP z+FQ@BX<-K|ppm=oSd=+^(f1eyl`#J;yx-js7+UBs1JqYI(ZI4IIV*$-InEJ%u}`4g z(#am;>KsYB%{~?*_8U;@=hsG9 z@9z;PKPUJzoPVCX@FFxwMyn};#Sk0_U!Ia;D_%bQ(Vu|d_Zk(mM@~was0FHJzju?| z(zeT8I>TtV`ut4Hd)lUj%qd1!0Ef=Uz{?73@V-tTI`rQycwFwnQkc=G;PK$!dKmu4 z{oB`6n9FL@i6A&NYt-!|9HyccY61D*m$dLUxzk84DbHpe0%%MS!1WWfnJ z6P#4-ePvSnqbI?=HT`yMfnrneLtNTL4#5b3}u#|qJJ00000NkvXXu0mjfViPM7 diff --git a/Splunk-TA-cisco-dnacenter/static/appIconAlt_2x.png b/Splunk-TA-cisco-dnacenter/static/appIconAlt_2x.png index 7e1d4b7c8463179c0cc77b083b89dc14225ac7fb..0e78f8153fc95a8c397854d1e42d2674e78c97c6 100644 GIT binary patch literal 8428 zcmVyTl%P*2L32A?{u7>=DL=uHp>lb1iJGf>0~PWE8O|Qb`*6B_3`|KN%f5bU+b8 z^(PzV17A(aftMI*Pm7kOgcUetOKK((BEgkVyrKmof>YwUq{(TS*{Y2Zgf z2Yz!j@rjwhQnHENhQ%L1PVSStU@eW zAe2ySNgN|e!<#w7{ETM$$M2y-IwYan# zCT#$PNnt*ODyF#fr3m~|vRck1Bdyg7!O&|H5g~_g8qWUp zZSRwolI4@EZ70%hkY-8ZM~zHJ?RelVMDqs4ct%?+sVQ?K#x2Dyh`&WuEJd8)JSc$- zxeVsqO8KT8dU^Wz=l}GNHIdDbiScF-%Mq2QV$@gA7>|%s9Sx zjfdeJ-TU01{5MEI1E$-FGum*o42JF)^AgKlZL3Mpf;8bq=A-#XLAF8z67J#Qa}k<1 zhwG;CgOi%jF+#)*TsVpcFvdwRT}agB`*Xx2y*&n`k1t6*J&ZK&vPi{p8$43S9Y_fk zOQh|rke1ylZ8rle(MV+2N|bKG4`l&C8q%Loj5?4a2s!02O&B&FtpNyT6y_r0QqGIe z9}`hjpnINs@jMQ>c=8RHGHyUD%$(yhHQPr3x{a4nJ0n8?DF(`@8zE339bjh_pq22$ zQ@F)792>@K5QcIj0%)F_m|Xw~=YNcC0h~+;c15U0#+(jL$SNho8cAul6xPznNhB2& z30ML%Hyfk0F(v^Na4KbZ{0qPsWEQ4rW(2#+6x1vVDp0u)67=F6CTL{L#!v+`2i9s4 zn2#~~Sk_>;X^8vG!edb)*_G; z!dDR6S`D>X1LyE4#=k~!(Gm&E(OmFYbIjj(SRZvd@%{xH8|wrHN|l7MjwDq|8wv8w zDMqDm!fm7ci2_LD-i#4n0I7=KG-?NRqk|alMaAg=b@`Pr>rVB}_4@$nK5bGA4$s)K zF3I$gknuH&Sz9B=Ss%K|PS}-pOfZe#gN@;}2OZDkegQH%0T2KVUOo)AGyr{kJ8-}9 zrHO~B8EG2DSO*~Amx#qLLd=I9;!gA)>M7mVt^VqaRX014X3APh9eW+X*%VsVcDUG#->UM8&rY~nudy1{$T)GR5?<>FedRlufhymB!Zl?q;uvo z+pJqXJw3+#-*s#LPd~}%fG^X+*&rQfcVxMnBRA0rm1>2^CU8&ju>pd5L8T(t9}j`@ zxZ{=kMHo)q3m~YU#?wXcN5~uy_KYvY9|S^1i(*^F@yWuz1iU~BpVP+Z<2r#8VGv{g za9d>qe5l}YO~mTP$T>p!VPaXAv3vITq5oon^N(&lZ;0kZAwguS19oOzWZ7Fo%iaTT zwHDITsyR>H85pR#GKNP-1O^w{6F}>H49A2ea3weKOAd@n{y|Hs2EE)OWNa?>Vs|srw=jV-l+m|1cYaYH*h5h?BX6XebU8&>lTDZ? z!;%z(1hK*_i&zWSYAHml3odLEhTH{*W`L^4%Q4|ZP37X4$$(VA{iiUY0r=0i@ckTR zm7%3#X6DfQ+<)S~o>*opwO3KTS6bF?#5VW z*SMAwJo58AQUw^PG7Y1$ABGh?=}Ktxs-151XJ-XsvIJgJiuC6+=^oIe=iB!M&?&~) zVWzF0Z(ZIo+_e1_+SOLIc~+!D$sHalTAFpE6GM+wubpe=JQ7)t$Q1}7w!qVOo4CZJ zFi@*{JT>G&aX4Wdu#rR`AE0jJh5-k!(}-(NNSzrCQfN<9Rg|^ob*KF{ z-Pk_Pn;K^9I#Fb%2$4|AleS&>=TG-~w^jXqBxn(!=bsq8-r4!COEOWb6r__0)&RAl zG9tPByghLC@0ZS8pKrFRm2_j3$d$0;YXQm@NYq+9;Fb8%CVY1njW;v795IG-4uIW+ z{-4F`XA#wJ>y(^58BD~1Wa2;n{Su_#dz5f@>)1GpuE=ug5%;)pV2AVo-p7Z{xrIFH zl8C~7JMb>^P+#+@KdI|v-kz$2Inol5nX3NN&;8JTG&^2rv-uWVNMk$GNUR~%Di_)8 z@|E4sL%&kYQ8g+CozLo;(r9OXqVig}$-xAm|UTLIE~Jn&Vx zrV${CMUW7k;a*{^YLuM9@l$YqXDr*gdNbt|^Xv9Gj-pEgCkAFWK-vZu-whROz`0dO z&;*y^6%hs(kRqHA;oOiNg|oMUe3@)Wh3^N)jQ6Eu-HjFzUC7lo1Drj0wZU#x@u1$p zpk8Zjvj(bNiOSQo7jk=VBfSXF*Z%18WTxR}hqm+{i1tBDcw_85Fd;$I_e6g3c(pp_ zA3R7T0zY>0BuQ7Te5+4cC)0E@4CZD$Mi_6DRWvedf&_9=+2fiM+%Oq}WS!-dzDYRm z4^wO+-@YgClOk12Wmkk2+abBNQwy;ZD%J$ET!d#D5s`m}MwRnc;Jr;|#>S?TH>;$l z=Z?wnONY0$aHcjQ2763twjU3s1BS7P2X+zfKZeV_KuX2Jv(FCs4+PL3e(};&UHjxx zVMR{>^?!!RY_82uAA?$aH<6%+t5^GCfXv<6G-YAdxZU$8PmOR4rjHt21z2Do2%~(Z#~k|Ol`4I3c3yIwF^$slrajp#~_cq zw|G$PkG#TofeaJ?&aW(|{N%*OHYNEMrTAX_-e>WkjUCG4{XApjN6_1|Rya5N^wT5v zjDw7uUzDH&|8ywV(lEM=+rd+q#8+`_>w*Lw!TXar0N14z`=pNtg6?jKvMY5?&DqRW#^lRgxW5p9cWZq=VX z@w=aM?9f>)1z!iSx1yfcfb^mWJ2EVYe=N=O!z0P5@=Gs#UDaLO?2gSc%fuJVs><2W z4^#U`!h^7RFZFh}S(d#G4|yNF>*pXrOAsN<1FUlvqpvs-J)5z_^y7~ouRf3j-7i2b zy(=vr4zxm%JceOnx|FcTMw|HHaC^h}t@H}dV;f++AH&#JV{%#G8CT6~RRH)!LT3Q} z5KQ<5fOX7f^b(P3bfj}*RB38!(%fDG`?(SUY#P-Y57GTNk+OfbWn_Z9@Iv&ni(8VF zlGPptEW=m;+t0m7@j0*2FPaBPdb?XbRe-`Ho3cq_t%j6zz_Qc_k`u4u%nyzC?_l3s018hwx>BicnY-S)j+3;VMlGY! z6vHHAp`>gc!RpxbcRpVy^IP$A1fctJW)Xhuzl&eM=ztx7gtAI!{`5>CUX3J~wz z+<`o4FAUxWj2BOvg^|=(=kn>Br%tV$92jWLYN>Y^H35KIV2?TgmI?Z9tC}&crcQMo z$4@{6&S+^tnljgOb=hLEW3BW=Xs2AwOT{VIaV(@09Mg(P%8Re;pt?Q760_yZWgC!ii{Fi2A{J1rGGmd~Z%{O^Cg z1!Z365k}=!7~;?B~eEhlUE>{_NNLEJGy0^)Z$A3Wm!gYp?YIYPDnhiyA+z;2t9=1)P9CIt`C@mN@Qp z6sSu5hk8ub|B>zG!&~>E(D7%u(|!XG?s*yAmLH%V-)lMEWfVWG-)MmT@>|bc53?2Y zfpt6DA!{*#y$B+otGRv+p{mu!p&F)sVO;+NBYd)yNbXKuk>3V@hK&i{zrV)`>{6CF=uXgRKO|>2BnT-9HL6x?n8%`IJQ{;m>1#l( z53$}Uo6r-!tF9j>=+u6wEAra{5N3XW@7U4ckk+~m%aYvyWe@c0v#=egA&~?kM%23F z2Bv8t85fRPw(bXNr{VhFLR0HDul6Ixr-PRVTn{f4c0D}?)Sp{%Rou*c* zjcZ5Rcr>ITbP$q+`;k7Fslv}NjXsQAm=NAeg5M3s>#4 zF91^fKCadJD)O~g0p=-3%B!DxpePSNwgk21eTd__F~OziaLcN+MkXumH+si*-#Rn7 zGa!PY3mTP?atuyizdlV6`0e`azis5MT8k)qCjc@5WH$h5hBT#3Eqj4gVr-Gg(}r;T z0%QD$psGIzg3*(&eb4mo$PeGxu?`)%-^a&psHBB!Z^7Ij0YLplQXI>KBK4`pLFD+} zI<%zL2r8FS9W@mFq%~e{tzIgu*L|})?1``~0pd~$Pj;G?z0)K&KQdpLD(@KYvl67C zSy8!3Y4wjV!R-KOIRIKvHE@=Yp+d4y+%g;_@-8JDaS=HygmvMqSN?-(NNPw>2da%b zQ3^KEIj&KN-0797Vz(kWGnXQ%`cD<0x&OVxX=otLkrMSmWM^&`mt_VDYqM9%tJ-db z%a@l)3MC8SAPgMm)ygq3AM}%;IJWWBIn!Bnqja@1qT%l#_Sp&B)eRs_#-G9GxGaMG zrZbv1u@S0@Zv(_LuyschPWmbV_vjn{{uc=}*q3P;)EU!-&OkK?)O5x|+bevxnj_uK zpDIA)5c<@K9nLself02@&yGZ0?W0~tSDrPltFqMQh}mrrd(_DFm|ZQO7JlUn>N#hp zzxBHlh>f_P&bJY6Z$uEi9W2N_ICm{nwGAo;km63Q1WLxWPeU@UnL-!k>J8N1E``#a z`tFOpLBR?WKAX2hMbbQXWy+cMTl~>*|9&D5{l6-h{o&V8a;-fg&3X%2%2}$d zD<_@7>AUfcOvyD2_*M|i_Cg*%i~E@7<~*eEJYuXLS%ExPotc?EcyRQ=DvG9PpJ=+V z1mv|nVDp(|G%ni1V4hSu`d*?qbu=#85AYU{Rc+p8ra^FmvHnhg<3z93m|fvnf%npz|R20h}E;9jZprwIx}o8UpFK zWrP|w$wP{+L!!<@lFmeupYVM-axpmLliuDt7VZD~XPaAHS8N0|aTl)NAJ?Q!ZJUsb zT1GzxbMd^JujC)NX#eGZaechG{z@y1?>3Y=p967gWJ}+hTKS*u{X~1Z3@U2%x zX0458oP|D^*aDT>0twoJj_7L1c{2ncVE_V|pen*>7X7RZVtVn($+uCYfc5@JD4tH{-fy zc)S4a@hWQHZ>Bqn!>782!Ut{~n%48Wbpr|A5Vb{wuZ0z8G5~=tFgdTp#n*FtCy&m3 zhE^)gt(1h_iaT3EOY8&_Y%Nr4DO4-7$bYP{xje%t1qyV+m9RgPl~DB$KiM~Fz=^fvK^ljK zyztC3uOUksbV@3faYJnvLx)y)D(IhIS^C?lXLT{(l86#+C;G`xfrq>!qGCN%%kX$o z=w;Q&2BzJtlp#qL(G3ORJUnSd(GTp&(X$gB=>qxJ&zgEPwjQ>B|A<{ov@EqOwFLp^ zcI@1NNj(ZMS^csOpaEP_7sB>l7tK)96 zDReWY2(}>-;&CwBH{i7tKxIKvf}>iXU4tJ4`vWG1oB*54^wUp(1ompvaOvHlrc_5~G6}&Us_JJhmBLHDdL|6MZ;Wu4tlEhyvo#eW} zcD8~Rw-&0k0>NN2z)9k1!s{%81MSddsMR$3=>x9szitPWGg$3$c`7`T`!F~Lpsr{B zmD6Np>V=M0LG*Uwg%T~@4PchzUM5egRUi#BFkUM`DtMudje0hrqu6&7hVnW(wPVo6 zX~;2}gjUh9C{mrqa+LG1o&=PPv;$~1x4RF8>gkR{avj|Qk1_-73KMOq_r-{x?FRtvS723cv?_N(WtDD1=0K9};F zqh<}qp@VnfOZTuH2dWt>(o1w?`fX}2?z;`(AR#7LtP&UcKL8NEn`IJ+l~5)zp3V?R zvGB&ED)<}$KrF?|F87+$Jq(=57Zq?>Q;N zRuH(G@D5{Xm}3B>R^T!uZX@)y0UP!gN*;z92bXRZiCl83mD9FY=`SmNt?=!CJh#Yp zf4>NP=yUgh9^3VbS)r^IpoDET0VHm|3%`xpkp{bVu`E)%GG5OWuie74iVZnF-(F`@ zYZ(DNa~6)p=}Q$m^4K?w&!=(m*aHD{b@xA+%%|Lr$Rc}C*!m(EidzxifK~^sHj)xM zl=s>;3!^jx17t_};zC>?lKs@>1M zXt>J;PP2mUJrS*W1ye^k1tW6U7S_)PpZ-d2O2}p4Dn9`)0s0Hw3fGMVBDXE@!q{$) z1JpQ-3BjO-S_L;i$9;zf!CAXnc}EfN+Dw*Dli81Yt-A;j4)w6z&&;Hmm1;%ZXf=Ln zGYl{a{l>GIK0i&x1-AFi<$a5C?s)5(gvIfcG4N(VP6^+q;anCfp z4sE=cB|aH&-NfgU@S|@Dk@&^=XTCgBr7*yZKE;UqBLh$^m|Y}_-xlqdaSEeY0@HK? zMtKArp#jiv2SlXb(O&+h6(whKUgx zW3ZVzkjeaOYV@J;edq^bfEGrMdzh%Z3UM*Hnc)Y0^KFQhAwz;n6MF+g=a)Fq)Mk(b z(vy&nV>YqgI{*1E&Q~I_98&Wnjx9(~O#e5`RijGKW!M`q*0B%NI6VbdehsSiE(?PJ ztOFg?6?wyo7{GxjFOUuH1;`za!>b(-vr%itlLE1jST41p`RVm|TA=xn4h#oNp|I{v z$MZ31i7B=jz!qMnLHWV;UbPUxq%O1w;g|tPSgY~;pZpDG2Jb@+C8Ze?M5`972vKXn z3+0En#nNY4>|F1Ye^9Y)dl~c|gai5|knOi22?eO2JrPZ{b*anKd1drI^qPfUp`|a#LV4rCLKh2GtLw zQ3!!q1q6%`H^s4&0nRK~U5KGfW(Ti^rKMQ11wMctMp&Siz~mMQ_a7=Gy@)a^0}D_4)~|Cb1TO6?#dZ3Fzzf|A>fbrV?tWrGWh%Cp20qNs!%%Ph;h z2Eebv{#@l!-{iD9#R@F{2_@&l!bCyy6Iex(BwnJ^WS_5WrTX{SdDYAtPyMo0}x}N zp1{o!If7(!jPY>h_>+IEQfj82h2Cl;WwK=~XJ=yBz+k^yM(KNcVX`bLpc0001|Nkl9771S7p~E?m3l zDeiZ&ANM@d^XFKWV$HT#BEbUae*&=xCWP)>B(T5(OHm_4yAI^7_9M0bSionY4p8iZ znFx|r*;7uPsWm;^?uNiafbJn54*_}z&?hq?4<+cK1U;0XPlmufuKvGM-Yrc82Z_UOs9@PU}i8|O((>$iRKc?94qQ? zZ;NQjh(+fv{caH@G$;w1fqBftWIUL*PZHYCKH)^yM5&bcUHxt0Tnfhojxn4KQ%37? zSv?i}lo1QmMIxh9)z$x`F#W56R2V8ZBBswT9eaczbdqrCB$pdGLgTrT>wdvCpR4Hr zOyfA>J8KXNMwk$iJ3?x9qRtlyA2CzOc6#0I_kYD@G@H2c226TB=I+N#0+=xC^I^_T zkP;WT18bS0k|yv&4D;^%IQI7_W$)_LI7q0|5#clvj>eNm1C0);sDMO$$Xh6+ zjSElJBY8d#8WHhoxAM&Ph+BfOl~s1@nlTYfFu|c+h71mO7Ji4l0@KW10Wlx zAW1ZidphuUJN|B@l%K+7M`5Ze$Y2$z`s{lSJa9v!JBZ#BNB&G#p_kI24HDJ2OIEDXHrcF6<_5ry3wpF#z z-B{ol=9MXr&nFCzCw&bDaVhVlLb%*a`bC=#9#@AK?0fR=&OL@=*uC)MA3gDl@ zyl7t_Y(yd!k95@jQg_PP3%{G0n+?$^nm?VjFMJglbYTM(T0UWNt<6v)C zw$OAPs?_CcE(f4%m)_rH6Chm!sMV110x-(XtI0b+A4ZyffWHqAMr&ZmtH#|myQ=Wu zK?#uDp}ZvXzcyr>DR~p8^u|cm$Z3$CBD~KB2czQ%eZfcXmYd4~2of~3FL>#*Qg-Qm z4Ez`*X%EI;4exrQlLgSP>q|?`8EqH}K+5(o;kdeG9Wj#8{hDpHUJxLl2d_lx zp)9R#xA%f5r~%+V9Ma3+@nO3VKS|=pNoD9W)X(@JYyh=YaZ@Aq)7W3rxwg(?q~6=+ zYpJXZB?p_>$RDf?x;{Nq>U=se^aX+&b1i>hf@At7^xOjh=yQKsB4BJ)NSk6(CUFJc z0o%w}5_7XKd<#bh;0o#p!ClAEt>jU}L5Iv#tnGpT;e`h=@r{hJw*UqrDOWHTmPENo zB25<|IDBNbq+&E)G7i^Vh3l`veL9h@-X9>TB5M^6sJ)sKy3M7mhH_c6ef2L8{0_Y3 zhSpBTOABppMp4S=pC@#qP;fnns2JY&%+?nmcmwWOf;+YY$!O(J8+^cK$XTZGc>zh77fEEgnh4Nr ztW?xU#X6}gB6wdXr4HgW7Ns#ao)Ya=i!YcWwM3ALvU4K+ivoxd{+khW^7{IEVk58W z_jS8Rxnji%-fWKzB1{d&30;aWbQcCV0U+h#8t-801IgQqyk@85WGAB6UE9_@ak%fb z{$IcKRjBW`p;Fff&EP6U=zEPauuRxOcdYk}SbUqmGrW_!top>GOKS#w_XU0!5U z7Gp{paQ=OGxjhJ8UxV#>i!r@(+q$1cx-SdUSl4-U-b6(OP_#9G+Yg85v zca3V4Rj0u<<5*={$xXhFhNHHT6|N60Ri1LdZDrNWLJVUm-g^UV)F^+^jnyyq)*KQF(b>CWd78!% z0yuNvU6x{>hijER>SdFZ>q!f=%Jku7=pj6N%}AQJ#A4#vnnfG9SnB@N#DZytBnQ3jikJ{!b!+Tdirv8k8}bD*5q-bR7wCPR0{E z=MduF4ar&Pa@{~Iqa!s~9|$)*Pb_gl3)`npJyxb7_VaG5s*2`f9RG-qaSMzMY!J!7 zoc^du_bJ15PkKpb%Yp@U$)QQmB>+U=*$PP^_r;Urbi_H1P9K9yof*L}qkZ(r@iS!;`L zZvv8=t_q)T``7_0-#*VvoMb@bYQ6}W_f0Zu_Dl%2wRs9SahC&#kC&C!#?&WK*L>^%y|Afdn#*J{bUGahBvMA}v7_0?U*0&y!Ne9aM8zu$mM7F@TUXzY2erB1hz#_;J$UolbC7{!Flf!l|QG2M0O`^{SQ z@ovdkwWWA8q;DGXuYztVa$NZ~X|k%z%4*Y+_^|_2v87}ge9V`iLFrrxwr4+7Ywe>? zEIMS`?zQNi%ti7%7v8EAKlcqHdnydDM-`m2rx_#1IwG=u|Es@1{rmh|`R3y3h$5Ha zrSrP4L|C&XnsEMf_u~EO`#*MoR&Fl-8m!B`kOY<4D~NUpA)bBk-)B~h&d46e86Srx zg7P4D;JRFRlOV3`PDjoHr0PbLZUfNUsTA9GZ1@$rCg*P>m2WATgOBqy7~w_T0Kq9d zW3bqh%jO?W2cI81KowhN-HVC*8}_uPgX(WaP5jBSoA$0REDY(MD~M+D$eqxqufc|( zbV^3yGL^Hbt#^&83AN4!oLPr-_s2c3r`rriEpetPJ+zb%# zwOzEGkl~>acXnb(K~R~845B+i-$c<1@fSs9O)?y^6 z%Q4I8uoWY49H9L~-$6{YAJ%6VK-%11ck)23Do>F9jmS?8K*h!P=ypzq5irTMFw~_` zuM()43u@^%&|+NElg>_MyN&!`N(!5D9@T>neNOXqH%LVzC1k&c^>2zxe?+NDsX zd2rDxe(F&**d2M7~{(1hr9OC!bbVRs=<`+}-M z0;IftswQPKE=Svl`~`Kg7k9t-c<%~A`trXC3F;Lno~aqh-B6onR=oMp*3mW}Z8Dyp zaz$gq5pn8FS%S`4`al6#DDxm8bCHIq@(42bo-Liz0W_6QwE;vlAlW*&{&f!3I(Dx% z(U|P&()J1z7k1wb{ahW)54uKt&oLT~$ZIUQ<*Ub@_W zLbvviP+2-Zy5^4WW_c|-o@%@hzugI=eP=&_&Zrum+kj`RLC~}Ygs=B^Mc+D3Dl5}j z{K_pwGfEBYl{$bRCs9d#a#yAxBXMrn*Hk6ApxB5bIZ(}sF^F9>8)Xg7zs`1j!YcmqU z%0{_3O~hLH?y@oa+hiL>) zl=h4*CYrhhg%JF`&?n~Rn=gp2snJf@!hJ3&Jf93)8<4{0Uk#wMl<) zo-mBhp1FopRz$f z^+v;OLoV8{+B)m2c=)#fOiY0|NnW2X=mZ!}}ERi2iPeR0b@-SPPHF!D5xdZU(Vj@*PY2W*kN2KT7m z+Q%z5mJZ`2J8&BH??AtDPY?4trY9 zV8g0TKgy@4Q)oSZWx@Va1P#N<)93K!g1~(A^cGo$x5&|rnXo$}!6Z#HO1p=QK0K&2 zaC5fvvKfz*d)u08;wEqG2rciiA#*@6>*_=#t(ahwjv?# zfRVIcbwXSzs$G*Gij!Ehw&J(ZzA$~Ht>;afO0{S(VzjrajxqW)MyK#M*V$!d(w%(o z%SIb%Go48VcN5OOEp*Q)urd5Sgvv9mX>}iZ=r%f{Hu%XE%ufCfi^le~)kc$utkZ#G z*A`;M#r2$z3w{nSTMC6PfygQ;QWeOqP4OYs#%K%%8|)3b7t-`1_~BK=womU`_k<#A zowLE+v#C=HH8L*DlaUe3_NUnBPGK#%WiIy6C77KiKbSQ_X*D7&ATj?zg4kqV%rW-@UI8T^}??QU#2RZ~ZD+H7rYFQ3L;T^ma2wy3TX#ynyf=8afE~9E*K9Rl0;1AkJ$`Nw+@mV zjQ%$yfC7)clndI}XvdQoaDxFKaYP>qbD5JYdEm2SBsk z7L3JTw}{(WUzg@dt|Eklx^?4P}H7qaMkERTO82$gxTG2jWM z$JWn10{((H9Xkhvf8 z#9~_lo$cE}v#hCqWXax38hxLO!C(tQ6|O;OH4EX?O?b`CxFpSZyWFiylBkj-`+SR8 zpb`YND7poMcxw8nt2T@o(;yTh{-OYaRaDE15D1=tQ9g!Md~bs{SnchM*4kV)G>2|g zWbz?zg)2fvb~2F=!WX{|D$+&J>Z16aZ)NEPY*Dua-TwVQ1930PNa&BtNnJ)nt^8rCAFdXk>;M;M z-M00=>dCWwPwo8%MEYN0gSref1;ZKAf~;KNm|(;}^LGz(Rxmz z9nJOasY~%#7Ymp&+yRq&JxTBkJ$e>8Xb4c<0!$oCSelFYCl?5)dQvuySaM4JIGSgq zOi3C@uGm#LK-0@hYak&z5DqlZd3Sto4pwSr;zW@m5h!rfKLG0>9<&d=IxMMlKG?nb zH?7?R9{I@2rC55j1dB;VV`)GR*rp~H!=lA*S-O>I$6C*JFy7ztM4T#?wA34;y}q9SX8euW+LG%?k5s-nP2gCq$SyT~}i z>Hy-P8Z6amqSq~dAP<3_0aeJwN~v5})vl7AaN7|@wNS$fH?_xNb-h}=BY*np7--W( z;SKnco}dS-Zx~5o!KLF0Jq0eA9ZMkxYI2)oLmNSn{7K|j=P6!YtfXATbrc}6_mYLC zsYL^hL;P5(=^@PW5~h|ncYy-`p~Hzxy z&WFG-Ko^jYVSt7Kx;zsyEJ4E(G%P`vhrqA|T^@e_4;y2Ys~)4ooB#j-07*qoM6N<$ Eg6UuIW&i*H diff --git a/Splunk-TA-cisco-dnacenter/static/appIcon_2x.png b/Splunk-TA-cisco-dnacenter/static/appIcon_2x.png index 7e1d4b7c8463179c0cc77b083b89dc14225ac7fb..0e78f8153fc95a8c397854d1e42d2674e78c97c6 100644 GIT binary patch literal 8428 zcmVyTl%P*2L32A?{u7>=DL=uHp>lb1iJGf>0~PWE8O|Qb`*6B_3`|KN%f5bU+b8 z^(PzV17A(aftMI*Pm7kOgcUetOKK((BEgkVyrKmof>YwUq{(TS*{Y2Zgf z2Yz!j@rjwhQnHENhQ%L1PVSStU@eW zAe2ySNgN|e!<#w7{ETM$$M2y-IwYan# zCT#$PNnt*ODyF#fr3m~|vRck1Bdyg7!O&|H5g~_g8qWUp zZSRwolI4@EZ70%hkY-8ZM~zHJ?RelVMDqs4ct%?+sVQ?K#x2Dyh`&WuEJd8)JSc$- zxeVsqO8KT8dU^Wz=l}GNHIdDbiScF-%Mq2QV$@gA7>|%s9Sx zjfdeJ-TU01{5MEI1E$-FGum*o42JF)^AgKlZL3Mpf;8bq=A-#XLAF8z67J#Qa}k<1 zhwG;CgOi%jF+#)*TsVpcFvdwRT}agB`*Xx2y*&n`k1t6*J&ZK&vPi{p8$43S9Y_fk zOQh|rke1ylZ8rle(MV+2N|bKG4`l&C8q%Loj5?4a2s!02O&B&FtpNyT6y_r0QqGIe z9}`hjpnINs@jMQ>c=8RHGHyUD%$(yhHQPr3x{a4nJ0n8?DF(`@8zE339bjh_pq22$ zQ@F)792>@K5QcIj0%)F_m|Xw~=YNcC0h~+;c15U0#+(jL$SNho8cAul6xPznNhB2& z30ML%Hyfk0F(v^Na4KbZ{0qPsWEQ4rW(2#+6x1vVDp0u)67=F6CTL{L#!v+`2i9s4 zn2#~~Sk_>;X^8vG!edb)*_G; z!dDR6S`D>X1LyE4#=k~!(Gm&E(OmFYbIjj(SRZvd@%{xH8|wrHN|l7MjwDq|8wv8w zDMqDm!fm7ci2_LD-i#4n0I7=KG-?NRqk|alMaAg=b@`Pr>rVB}_4@$nK5bGA4$s)K zF3I$gknuH&Sz9B=Ss%K|PS}-pOfZe#gN@;}2OZDkegQH%0T2KVUOo)AGyr{kJ8-}9 zrHO~B8EG2DSO*~Amx#qLLd=I9;!gA)>M7mVt^VqaRX014X3APh9eW+X*%VsVcDUG#->UM8&rY~nudy1{$T)GR5?<>FedRlufhymB!Zl?q;uvo z+pJqXJw3+#-*s#LPd~}%fG^X+*&rQfcVxMnBRA0rm1>2^CU8&ju>pd5L8T(t9}j`@ zxZ{=kMHo)q3m~YU#?wXcN5~uy_KYvY9|S^1i(*^F@yWuz1iU~BpVP+Z<2r#8VGv{g za9d>qe5l}YO~mTP$T>p!VPaXAv3vITq5oon^N(&lZ;0kZAwguS19oOzWZ7Fo%iaTT zwHDITsyR>H85pR#GKNP-1O^w{6F}>H49A2ea3weKOAd@n{y|Hs2EE)OWNa?>Vs|srw=jV-l+m|1cYaYH*h5h?BX6XebU8&>lTDZ? z!;%z(1hK*_i&zWSYAHml3odLEhTH{*W`L^4%Q4|ZP37X4$$(VA{iiUY0r=0i@ckTR zm7%3#X6DfQ+<)S~o>*opwO3KTS6bF?#5VW z*SMAwJo58AQUw^PG7Y1$ABGh?=}Ktxs-151XJ-XsvIJgJiuC6+=^oIe=iB!M&?&~) zVWzF0Z(ZIo+_e1_+SOLIc~+!D$sHalTAFpE6GM+wubpe=JQ7)t$Q1}7w!qVOo4CZJ zFi@*{JT>G&aX4Wdu#rR`AE0jJh5-k!(}-(NNSzrCQfN<9Rg|^ob*KF{ z-Pk_Pn;K^9I#Fb%2$4|AleS&>=TG-~w^jXqBxn(!=bsq8-r4!COEOWb6r__0)&RAl zG9tPByghLC@0ZS8pKrFRm2_j3$d$0;YXQm@NYq+9;Fb8%CVY1njW;v795IG-4uIW+ z{-4F`XA#wJ>y(^58BD~1Wa2;n{Su_#dz5f@>)1GpuE=ug5%;)pV2AVo-p7Z{xrIFH zl8C~7JMb>^P+#+@KdI|v-kz$2Inol5nX3NN&;8JTG&^2rv-uWVNMk$GNUR~%Di_)8 z@|E4sL%&kYQ8g+CozLo;(r9OXqVig}$-xAm|UTLIE~Jn&Vx zrV${CMUW7k;a*{^YLuM9@l$YqXDr*gdNbt|^Xv9Gj-pEgCkAFWK-vZu-whROz`0dO z&;*y^6%hs(kRqHA;oOiNg|oMUe3@)Wh3^N)jQ6Eu-HjFzUC7lo1Drj0wZU#x@u1$p zpk8Zjvj(bNiOSQo7jk=VBfSXF*Z%18WTxR}hqm+{i1tBDcw_85Fd;$I_e6g3c(pp_ zA3R7T0zY>0BuQ7Te5+4cC)0E@4CZD$Mi_6DRWvedf&_9=+2fiM+%Oq}WS!-dzDYRm z4^wO+-@YgClOk12Wmkk2+abBNQwy;ZD%J$ET!d#D5s`m}MwRnc;Jr;|#>S?TH>;$l z=Z?wnONY0$aHcjQ2763twjU3s1BS7P2X+zfKZeV_KuX2Jv(FCs4+PL3e(};&UHjxx zVMR{>^?!!RY_82uAA?$aH<6%+t5^GCfXv<6G-YAdxZU$8PmOR4rjHt21z2Do2%~(Z#~k|Ol`4I3c3yIwF^$slrajp#~_cq zw|G$PkG#TofeaJ?&aW(|{N%*OHYNEMrTAX_-e>WkjUCG4{XApjN6_1|Rya5N^wT5v zjDw7uUzDH&|8ywV(lEM=+rd+q#8+`_>w*Lw!TXar0N14z`=pNtg6?jKvMY5?&DqRW#^lRgxW5p9cWZq=VX z@w=aM?9f>)1z!iSx1yfcfb^mWJ2EVYe=N=O!z0P5@=Gs#UDaLO?2gSc%fuJVs><2W z4^#U`!h^7RFZFh}S(d#G4|yNF>*pXrOAsN<1FUlvqpvs-J)5z_^y7~ouRf3j-7i2b zy(=vr4zxm%JceOnx|FcTMw|HHaC^h}t@H}dV;f++AH&#JV{%#G8CT6~RRH)!LT3Q} z5KQ<5fOX7f^b(P3bfj}*RB38!(%fDG`?(SUY#P-Y57GTNk+OfbWn_Z9@Iv&ni(8VF zlGPptEW=m;+t0m7@j0*2FPaBPdb?XbRe-`Ho3cq_t%j6zz_Qc_k`u4u%nyzC?_l3s018hwx>BicnY-S)j+3;VMlGY! z6vHHAp`>gc!RpxbcRpVy^IP$A1fctJW)Xhuzl&eM=ztx7gtAI!{`5>CUX3J~wz z+<`o4FAUxWj2BOvg^|=(=kn>Br%tV$92jWLYN>Y^H35KIV2?TgmI?Z9tC}&crcQMo z$4@{6&S+^tnljgOb=hLEW3BW=Xs2AwOT{VIaV(@09Mg(P%8Re;pt?Q760_yZWgC!ii{Fi2A{J1rGGmd~Z%{O^Cg z1!Z365k}=!7~;?B~eEhlUE>{_NNLEJGy0^)Z$A3Wm!gYp?YIYPDnhiyA+z;2t9=1)P9CIt`C@mN@Qp z6sSu5hk8ub|B>zG!&~>E(D7%u(|!XG?s*yAmLH%V-)lMEWfVWG-)MmT@>|bc53?2Y zfpt6DA!{*#y$B+otGRv+p{mu!p&F)sVO;+NBYd)yNbXKuk>3V@hK&i{zrV)`>{6CF=uXgRKO|>2BnT-9HL6x?n8%`IJQ{;m>1#l( z53$}Uo6r-!tF9j>=+u6wEAra{5N3XW@7U4ckk+~m%aYvyWe@c0v#=egA&~?kM%23F z2Bv8t85fRPw(bXNr{VhFLR0HDul6Ixr-PRVTn{f4c0D}?)Sp{%Rou*c* zjcZ5Rcr>ITbP$q+`;k7Fslv}NjXsQAm=NAeg5M3s>#4 zF91^fKCadJD)O~g0p=-3%B!DxpePSNwgk21eTd__F~OziaLcN+MkXumH+si*-#Rn7 zGa!PY3mTP?atuyizdlV6`0e`azis5MT8k)qCjc@5WH$h5hBT#3Eqj4gVr-Gg(}r;T z0%QD$psGIzg3*(&eb4mo$PeGxu?`)%-^a&psHBB!Z^7Ij0YLplQXI>KBK4`pLFD+} zI<%zL2r8FS9W@mFq%~e{tzIgu*L|})?1``~0pd~$Pj;G?z0)K&KQdpLD(@KYvl67C zSy8!3Y4wjV!R-KOIRIKvHE@=Yp+d4y+%g;_@-8JDaS=HygmvMqSN?-(NNPw>2da%b zQ3^KEIj&KN-0797Vz(kWGnXQ%`cD<0x&OVxX=otLkrMSmWM^&`mt_VDYqM9%tJ-db z%a@l)3MC8SAPgMm)ygq3AM}%;IJWWBIn!Bnqja@1qT%l#_Sp&B)eRs_#-G9GxGaMG zrZbv1u@S0@Zv(_LuyschPWmbV_vjn{{uc=}*q3P;)EU!-&OkK?)O5x|+bevxnj_uK zpDIA)5c<@K9nLself02@&yGZ0?W0~tSDrPltFqMQh}mrrd(_DFm|ZQO7JlUn>N#hp zzxBHlh>f_P&bJY6Z$uEi9W2N_ICm{nwGAo;km63Q1WLxWPeU@UnL-!k>J8N1E``#a z`tFOpLBR?WKAX2hMbbQXWy+cMTl~>*|9&D5{l6-h{o&V8a;-fg&3X%2%2}$d zD<_@7>AUfcOvyD2_*M|i_Cg*%i~E@7<~*eEJYuXLS%ExPotc?EcyRQ=DvG9PpJ=+V z1mv|nVDp(|G%ni1V4hSu`d*?qbu=#85AYU{Rc+p8ra^FmvHnhg<3z93m|fvnf%npz|R20h}E;9jZprwIx}o8UpFK zWrP|w$wP{+L!!<@lFmeupYVM-axpmLliuDt7VZD~XPaAHS8N0|aTl)NAJ?Q!ZJUsb zT1GzxbMd^JujC)NX#eGZaechG{z@y1?>3Y=p967gWJ}+hTKS*u{X~1Z3@U2%x zX0458oP|D^*aDT>0twoJj_7L1c{2ncVE_V|pen*>7X7RZVtVn($+uCYfc5@JD4tH{-fy zc)S4a@hWQHZ>Bqn!>782!Ut{~n%48Wbpr|A5Vb{wuZ0z8G5~=tFgdTp#n*FtCy&m3 zhE^)gt(1h_iaT3EOY8&_Y%Nr4DO4-7$bYP{xje%t1qyV+m9RgPl~DB$KiM~Fz=^fvK^ljK zyztC3uOUksbV@3faYJnvLx)y)D(IhIS^C?lXLT{(l86#+C;G`xfrq>!qGCN%%kX$o z=w;Q&2BzJtlp#qL(G3ORJUnSd(GTp&(X$gB=>qxJ&zgEPwjQ>B|A<{ov@EqOwFLp^ zcI@1NNj(ZMS^csOpaEP_7sB>l7tK)96 zDReWY2(}>-;&CwBH{i7tKxIKvf}>iXU4tJ4`vWG1oB*54^wUp(1ompvaOvHlrc_5~G6}&Us_JJhmBLHDdL|6MZ;Wu4tlEhyvo#eW} zcD8~Rw-&0k0>NN2z)9k1!s{%81MSddsMR$3=>x9szitPWGg$3$c`7`T`!F~Lpsr{B zmD6Np>V=M0LG*Uwg%T~@4PchzUM5egRUi#BFkUM`DtMudje0hrqu6&7hVnW(wPVo6 zX~;2}gjUh9C{mrqa+LG1o&=PPv;$~1x4RF8>gkR{avj|Qk1_-73KMOq_r-{x?FRtvS723cv?_N(WtDD1=0K9};F zqh<}qp@VnfOZTuH2dWt>(o1w?`fX}2?z;`(AR#7LtP&UcKL8NEn`IJ+l~5)zp3V?R zvGB&ED)<}$KrF?|F87+$Jq(=57Zq?>Q;N zRuH(G@D5{Xm}3B>R^T!uZX@)y0UP!gN*;z92bXRZiCl83mD9FY=`SmNt?=!CJh#Yp zf4>NP=yUgh9^3VbS)r^IpoDET0VHm|3%`xpkp{bVu`E)%GG5OWuie74iVZnF-(F`@ zYZ(DNa~6)p=}Q$m^4K?w&!=(m*aHD{b@xA+%%|Lr$Rc}C*!m(EidzxifK~^sHj)xM zl=s>;3!^jx17t_};zC>?lKs@>1M zXt>J;PP2mUJrS*W1ye^k1tW6U7S_)PpZ-d2O2}p4Dn9`)0s0Hw3fGMVBDXE@!q{$) z1JpQ-3BjO-S_L;i$9;zf!CAXnc}EfN+Dw*Dli81Yt-A;j4)w6z&&;Hmm1;%ZXf=Ln zGYl{a{l>GIK0i&x1-AFi<$a5C?s)5(gvIfcG4N(VP6^+q;anCfp z4sE=cB|aH&-NfgU@S|@Dk@&^=XTCgBr7*yZKE;UqBLh$^m|Y}_-xlqdaSEeY0@HK? zMtKArp#jiv2SlXb(O&+h6(whKUgx zW3ZVzkjeaOYV@J;edq^bfEGrMdzh%Z3UM*Hnc)Y0^KFQhAwz;n6MF+g=a)Fq)Mk(b z(vy&nV>YqgI{*1E&Q~I_98&Wnjx9(~O#e5`RijGKW!M`q*0B%NI6VbdehsSiE(?PJ ztOFg?6?wyo7{GxjFOUuH1;`za!>b(-vr%itlLE1jST41p`RVm|TA=xn4h#oNp|I{v z$MZ31i7B=jz!qMnLHWV;UbPUxq%O1w;g|tPSgY~;pZpDG2Jb@+C8Ze?M5`972vKXn z3+0En#nNY4>|F1Ye^9Y)dl~c|gai5|knOi22?eO2JrPZ{b*anKd1drI^qPfUp`|a#LV4rCLKh2GtLw zQ3!!q1q6%`H^s4&0nRK~U5KGfW(Ti^rKMQ11wMctMp&Siz~mMQ_a7=Gy@)a^0}D_4)~|Cb1TO6?#dZ3Fzzf|A>fbrV?tWrGWh%Cp20qNs!%%Ph;h z2Eebv{#@l!-{iD9#R@F{2_@&l!bCyy6Iex(BwnJ^WS_5WrTX{SdDYAtPyMo0}x}N zp1{o!If7(!jPY>h_>+IEQfj82h2Cl;WwK=~XJ=yBz+k^yM(KNcVX`bLpc0001|Nkl9771S7p~E?m3l zDeiZ&ANM@d^XFKWV$HT#BEbUae*&=xCWP)>B(T5(OHm_4yAI^7_9M0bSionY4p8iZ znFx|r*;7uPsWm;^?uNiafbJn54*_}z&?hq?4<+cK1U;0XPlmufuKvGM-Yrc82Z_UOs9@PU}i8|O((>$iRKc?94qQ? zZ;NQjh(+fv{caH@G$;w1fqBftWIUL*PZHYCKH)^yM5&bcUHxt0Tnfhojxn4KQ%37? zSv?i}lo1QmMIxh9)z$x`F#W56R2V8ZBBswT9eaczbdqrCB$pdGLgTrT>wdvCpR4Hr zOyfA>J8KXNMwk$iJ3?x9qRtlyA2CzOc6#0I_kYD@G@H2c226TB=I+N#0+=xC^I^_T zkP;WT18bS0k|yv&4D;^%IQI7_W$)_LI7q0|5#clvj>eNm1C0);sDMO$$Xh6+ zjSElJBY8d#8WHhoxAM&Ph+BfOl~s1@nlTYfFu|c+h71mO7Ji4l0@KW10Wlx zAW1ZidphuUJN|B@l%K+7M`5Ze$Y2$z`s{lSJa9v!JBZ#BNB&G#p_kI24HDJ2OIEDXHrcF6<_5ry3wpF#z z-B{ol=9MXr&nFCzCw&bDaVhVlLb%*a`bC=#9#@AK?0fR=&OL@=*uC)MA3gDl@ zyl7t_Y(yd!k95@jQg_PP3%{G0n+?$^nm?VjFMJglbYTM(T0UWNt<6v)C zw$OAPs?_CcE(f4%m)_rH6Chm!sMV110x-(XtI0b+A4ZyffWHqAMr&ZmtH#|myQ=Wu zK?#uDp}ZvXzcyr>DR~p8^u|cm$Z3$CBD~KB2czQ%eZfcXmYd4~2of~3FL>#*Qg-Qm z4Ez`*X%EI;4exrQlLgSP>q|?`8EqH}K+5(o;kdeG9Wj#8{hDpHUJxLl2d_lx zp)9R#xA%f5r~%+V9Ma3+@nO3VKS|=pNoD9W)X(@JYyh=YaZ@Aq)7W3rxwg(?q~6=+ zYpJXZB?p_>$RDf?x;{Nq>U=se^aX+&b1i>hf@At7^xOjh=yQKsB4BJ)NSk6(CUFJc z0o%w}5_7XKd<#bh;0o#p!ClAEt>jU}L5Iv#tnGpT;e`h=@r{hJw*UqrDOWHTmPENo zB25<|IDBNbq+&E)G7i^Vh3l`veL9h@-X9>TB5M^6sJ)sKy3M7mhH_c6ef2L8{0_Y3 zhSpBTOABppMp4S=pC@#qP;fnns2JY&%+?nmcmwWOf;+YY$!O(J8+^cK$XTZGc>zh77fEEgnh4Nr ztW?xU#X6}gB6wdXr4HgW7Ns#ao)Ya=i!YcWwM3ALvU4K+ivoxd{+khW^7{IEVk58W z_jS8Rxnji%-fWKzB1{d&30;aWbQcCV0U+h#8t-801IgQqyk@85WGAB6UE9_@ak%fb z{$IcKRjBW`p;Fff&EP6U=zEPauuRxOcdYk}SbUqmGrW_!top>GOKS#w_XU0!5U z7Gp{paQ=OGxjhJ8UxV#>i!r@(+q$1cx-SdUSl4-U-b6(OP_#9G+Yg85v zca3V4Rj0u<<5*={$xXhFhNHHT6|N60Ri1LdZDrNWLJVUm-g^UV)F^+^jnyyq)*KQF(b>CWd78!% z0yuNvU6x{>hijER>SdFZ>q!f=%Jku7=pj6N%}AQJ#A4#vnnfG9SnB@N#DZytBnQ3jikJ{!b!+Tdirv8k8}bD*5q-bR7wCPR0{E z=MduF4ar&Pa@{~Iqa!s~9|$)*Pb_gl3)`npJyxb7_VaG5s*2`f9RG-qaSMzMY!J!7 zoc^du_bJ15PkKpb%Yp@U$)QQmB>+U=*$PP^_r;Urbi_H1P9K9yof*L}qkZ(r@iS!;`L zZvv8=t_q)T``7_0-#*VvoMb@bYQ6}W_f0Zu_Dl%2wRs9SahC&#kC&C!#?&WK*L>^%y|Afdn#*J{bUGahBvMA}v7_0?U*0&y!Ne9aM8zu$mM7F@TUXzY2erB1hz#_;J$UolbC7{!Flf!l|QG2M0O`^{SQ z@ovdkwWWA8q;DGXuYztVa$NZ~X|k%z%4*Y+_^|_2v87}ge9V`iLFrrxwr4+7Ywe>? zEIMS`?zQNi%ti7%7v8EAKlcqHdnydDM-`m2rx_#1IwG=u|Es@1{rmh|`R3y3h$5Ha zrSrP4L|C&XnsEMf_u~EO`#*MoR&Fl-8m!B`kOY<4D~NUpA)bBk-)B~h&d46e86Srx zg7P4D;JRFRlOV3`PDjoHr0PbLZUfNUsTA9GZ1@$rCg*P>m2WATgOBqy7~w_T0Kq9d zW3bqh%jO?W2cI81KowhN-HVC*8}_uPgX(WaP5jBSoA$0REDY(MD~M+D$eqxqufc|( zbV^3yGL^Hbt#^&83AN4!oLPr-_s2c3r`rriEpetPJ+zb%# zwOzEGkl~>acXnb(K~R~845B+i-$c<1@fSs9O)?y^6 z%Q4I8uoWY49H9L~-$6{YAJ%6VK-%11ck)23Do>F9jmS?8K*h!P=ypzq5irTMFw~_` zuM()43u@^%&|+NElg>_MyN&!`N(!5D9@T>neNOXqH%LVzC1k&c^>2zxe?+NDsX zd2rDxe(F&**d2M7~{(1hr9OC!bbVRs=<`+}-M z0;IftswQPKE=Svl`~`Kg7k9t-c<%~A`trXC3F;Lno~aqh-B6onR=oMp*3mW}Z8Dyp zaz$gq5pn8FS%S`4`al6#DDxm8bCHIq@(42bo-Liz0W_6QwE;vlAlW*&{&f!3I(Dx% z(U|P&()J1z7k1wb{ahW)54uKt&oLT~$ZIUQ<*Ub@_W zLbvviP+2-Zy5^4WW_c|-o@%@hzugI=eP=&_&Zrum+kj`RLC~}Ygs=B^Mc+D3Dl5}j z{K_pwGfEBYl{$bRCs9d#a#yAxBXMrn*Hk6ApxB5bIZ(}sF^F9>8)Xg7zs`1j!YcmqU z%0{_3O~hLH?y@oa+hiL>) zl=h4*CYrhhg%JF`&?n~Rn=gp2snJf@!hJ3&Jf93)8<4{0Uk#wMl<) zo-mBhp1FopRz$f z^+v;OLoV8{+B)m2c=)#fOiY0|NnW2X=mZ!}}ERi2iPeR0b@-SPPHF!D5xdZU(Vj@*PY2W*kN2KT7m z+Q%z5mJZ`2J8&BH??AtDPY?4trY9 zV8g0TKgy@4Q)oSZWx@Va1P#N<)93K!g1~(A^cGo$x5&|rnXo$}!6Z#HO1p=QK0K&2 zaC5fvvKfz*d)u08;wEqG2rciiA#*@6>*_=#t(ahwjv?# zfRVIcbwXSzs$G*Gij!Ehw&J(ZzA$~Ht>;afO0{S(VzjrajxqW)MyK#M*V$!d(w%(o z%SIb%Go48VcN5OOEp*Q)urd5Sgvv9mX>}iZ=r%f{Hu%XE%ufCfi^le~)kc$utkZ#G z*A`;M#r2$z3w{nSTMC6PfygQ;QWeOqP4OYs#%K%%8|)3b7t-`1_~BK=womU`_k<#A zowLE+v#C=HH8L*DlaUe3_NUnBPGK#%WiIy6C77KiKbSQ_X*D7&ATj?zg4kqV%rW-@UI8T^}??QU#2RZ~ZD+H7rYFQ3L;T^ma2wy3TX#ynyf=8afE~9E*K9Rl0;1AkJ$`Nw+@mV zjQ%$yfC7)clndI}XvdQoaDxFKaYP>qbD5JYdEm2SBsk z7L3JTw}{(WUzg@dt|Eklx^?4P}H7qaMkERTO82$gxTG2jWM z$JWn10{((H9Xkhvf8 z#9~_lo$cE}v#hCqWXax38hxLO!C(tQ6|O;OH4EX?O?b`CxFpSZyWFiylBkj-`+SR8 zpb`YND7poMcxw8nt2T@o(;yTh{-OYaRaDE15D1=tQ9g!Md~bs{SnchM*4kV)G>2|g zWbz?zg)2fvb~2F=!WX{|D$+&J>Z16aZ)NEPY*Dua-TwVQ1930PNa&BtNnJ)nt^8rCAFdXk>;M;M z-M00=>dCWwPwo8%MEYN0gSref1;ZKAf~;KNm|(;}^LGz(Rxmz z9nJOasY~%#7Ymp&+yRq&JxTBkJ$e>8Xb4c<0!$oCSelFYCl?5)dQvuySaM4JIGSgq zOi3C@uGm#LK-0@hYak&z5DqlZd3Sto4pwSr;zW@m5h!rfKLG0>9<&d=IxMMlKG?nb zH?7?R9{I@2rC55j1dB;VV`)GR*rp~H!=lA*S-O>I$6C*JFy7ztM4T#?wA34;y}q9SX8euW+LG%?k5s-nP2gCq$SyT~}i z>Hy-P8Z6amqSq~dAP<3_0aeJwN~v5})vl7AaN7|@wNS$fH?_xNb-h}=BY*np7--W( z;SKnco}dS-Zx~5o!KLF0Jq0eA9ZMkxYI2)oLmNSn{7K|j=P6!YtfXATbrc}6_mYLC zsYL^hL;P5(=^@PW5~h|ncYy-`p~Hzxy z&WFG-Ko^jYVSt7Kx;zsyEJ4E(G%P`vhrqA|T^@e_4;y2Ys~)4ooB#j-07*qoM6N<$ Eg6UuIW&i*H From 5d6eba92f497d99659e3ef9f661f283298be68ff Mon Sep 17 00:00:00 2001 From: William Astorga Date: Thu, 12 Oct 2023 23:55:57 -0600 Subject: [PATCH 2/6] fix: update builder_version from 4.1.0 to 4.1.3, part 2 --- Splunk-TA-cisco-dnacenter/README.txt | 11 +---------- Splunk-TA-cisco-dnacenter/default/addon_builder.conf | 3 ++- Splunk-TA-cisco-dnacenter/default/app.conf | 2 +- Splunk-TA-cisco-dnacenter/default/props.conf | 9 +++++++++ 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/Splunk-TA-cisco-dnacenter/README.txt b/Splunk-TA-cisco-dnacenter/README.txt index 3e8ea2b..529594b 100644 --- a/Splunk-TA-cisco-dnacenter/README.txt +++ b/Splunk-TA-cisco-dnacenter/README.txt @@ -1,12 +1,3 @@ This is an add-on powered by the Splunk Add-on Builder. # Binary File Declaration -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/pvectorc.cpython-37m-x86_64-linux-gnu.so: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/markupsafe/_speedups.cpython-37m-x86_64-linux-gnu.so: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-32.exe: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli.exe: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-arm64.exe: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-32.exe: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui.exe: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/gui-64.exe: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-arm64.exe: this file does not require any source code -/opt/splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/setuptools/cli-64.exe: this file does not require any source code +/Applications/Splunk/var/data/tabuilder/package/Splunk-TA-cisco-dnacenter/bin/splunk_ta_cisco_dnacenter/aob_py3/markupsafe/_speedups.cpython-37m-x86_64-linux-gnu.so: this file does not require any source code diff --git a/Splunk-TA-cisco-dnacenter/default/addon_builder.conf b/Splunk-TA-cisco-dnacenter/default/addon_builder.conf index ae336ae..fb73604 100644 --- a/Splunk-TA-cisco-dnacenter/default/addon_builder.conf +++ b/Splunk-TA-cisco-dnacenter/default/addon_builder.conf @@ -3,4 +3,5 @@ [base] builder_version = 4.1.3 builder_build = 0 -is_edited = 1 \ No newline at end of file +is_edited = 1 + diff --git a/Splunk-TA-cisco-dnacenter/default/app.conf b/Splunk-TA-cisco-dnacenter/default/app.conf index 6423dba..59309d3 100644 --- a/Splunk-TA-cisco-dnacenter/default/app.conf +++ b/Splunk-TA-cisco-dnacenter/default/app.conf @@ -7,7 +7,7 @@ build = 1 [launcher] author = Cisco Systems -version = 1.0.0 +version = 1.0.4 description = Cisco DNA Center Add-on for Splunk [ui] diff --git a/Splunk-TA-cisco-dnacenter/default/props.conf b/Splunk-TA-cisco-dnacenter/default/props.conf index e807861..1152ff8 100644 --- a/Splunk-TA-cisco-dnacenter/default/props.conf +++ b/Splunk-TA-cisco-dnacenter/default/props.conf @@ -7,53 +7,62 @@ [cisco:dnac:clienthealth] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none [cisco:dnac:compliance] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none [cisco:dnac:devicehealth] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none [cisco:dnac:fabricdevice] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none [cisco:dnac:fabricsite] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none [cisco:dnac:image] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none [cisco:dnac:networkhealth] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none [cisco:dnac:issue] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none [cisco:dnac:securityadvisory] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = 0 +category = Splunk App Add-on Builder pulldown_type = 1 KV_MODE = none From 0eb7e2b2bc514eabe1fecab5a67b82da67d5a83a Mon Sep 17 00:00:00 2001 From: William Astorga Date: Thu, 12 Oct 2023 23:57:28 -0600 Subject: [PATCH 3/6] fix: remove unneeded/unused input files --- ...co_dnacenter_rh_cisco_dnac_fabricdevice.py | 75 --- ...isco_dnacenter_rh_cisco_dnac_fabricsite.py | 75 --- ..._TA_cisco_dnacenter_rh_cisco_dnac_image.py | 75 --- ...co_dnacenter_rh_cisco_dnac_sensordevice.py | 75 --- .../bin/cisco_dnac_fabricdevice.py | 95 ---- .../bin/cisco_dnac_fabricsite.py | 95 ---- .../bin/cisco_dnac_image.py | 95 ---- .../bin/cisco_dnac_sensordevice.py | 95 ---- .../input_module_cisco_dnac_fabricdevice.py | 238 -------- .../bin/input_module_cisco_dnac_fabricsite.py | 234 -------- .../bin/input_module_cisco_dnac_image.py | 523 ------------------ .../input_module_cisco_dnac_sensordevice.py | 135 ----- 12 files changed, 1810 deletions(-) delete mode 100755 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_fabricdevice.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_fabricsite.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_image.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_sensordevice.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_fabricdevice.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_fabricsite.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_image.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/cisco_dnac_sensordevice.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_fabricdevice.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_fabricsite.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_image.py delete mode 100755 Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_sensordevice.py diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_fabricdevice.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_fabricdevice.py deleted file mode 100755 index ff83b08..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_fabricdevice.py +++ /dev/null @@ -1,75 +0,0 @@ - -import splunk_ta_cisco_dnacenter_declare - -from splunktaucclib.rest_handler.endpoint import ( - field, - validator, - RestModel, - DataInputModel, -) -from splunktaucclib.rest_handler import admin_external, util -from splunk_aoblib.rest_migration import ConfigMigrationHandler - -util.remove_http_proxy_env_vars() - - -fields = [ - field.RestField( - 'interval', - required=True, - encrypted=False, - default=None, - validator=validator.Pattern( - regex=r"""^\-[1-9]\d*$|^\d*$""", - ) - ), - field.RestField( - 'index', - required=True, - encrypted=False, - default='default', - validator=validator.String( - min_len=1, - max_len=80, - ) - ), - field.RestField( - 'cisco_dna_center_host', - required=True, - encrypted=False, - default=None, - validator=validator.String( - min_len=0, - max_len=8192, - ) - ), - field.RestField( - 'cisco_dna_center_account', - required=True, - encrypted=False, - default=None, - validator=None - ), - - field.RestField( - 'disabled', - required=False, - validator=None - ) - -] -model = RestModel(fields, name=None) - - - -endpoint = DataInputModel( - 'cisco_dnac_fabricdevice', - model, -) - - -if __name__ == '__main__': - admin_external.handle( - endpoint, - handler=ConfigMigrationHandler, - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_fabricsite.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_fabricsite.py deleted file mode 100755 index b7d4346..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_fabricsite.py +++ /dev/null @@ -1,75 +0,0 @@ - -import splunk_ta_cisco_dnacenter_declare - -from splunktaucclib.rest_handler.endpoint import ( - field, - validator, - RestModel, - DataInputModel, -) -from splunktaucclib.rest_handler import admin_external, util -from splunk_aoblib.rest_migration import ConfigMigrationHandler - -util.remove_http_proxy_env_vars() - - -fields = [ - field.RestField( - 'interval', - required=True, - encrypted=False, - default=None, - validator=validator.Pattern( - regex=r"""^\-[1-9]\d*$|^\d*$""", - ) - ), - field.RestField( - 'index', - required=True, - encrypted=False, - default='default', - validator=validator.String( - min_len=1, - max_len=80, - ) - ), - field.RestField( - 'cisco_dna_center_host', - required=True, - encrypted=False, - default=None, - validator=validator.String( - min_len=0, - max_len=8192, - ) - ), - field.RestField( - 'cisco_dna_center_account', - required=True, - encrypted=False, - default=None, - validator=None - ), - - field.RestField( - 'disabled', - required=False, - validator=None - ) - -] -model = RestModel(fields, name=None) - - - -endpoint = DataInputModel( - 'cisco_dnac_fabricsite', - model, -) - - -if __name__ == '__main__': - admin_external.handle( - endpoint, - handler=ConfigMigrationHandler, - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_image.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_image.py deleted file mode 100755 index ffa9081..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_image.py +++ /dev/null @@ -1,75 +0,0 @@ - -import splunk_ta_cisco_dnacenter_declare - -from splunktaucclib.rest_handler.endpoint import ( - field, - validator, - RestModel, - DataInputModel, -) -from splunktaucclib.rest_handler import admin_external, util -from splunk_aoblib.rest_migration import ConfigMigrationHandler - -util.remove_http_proxy_env_vars() - - -fields = [ - field.RestField( - 'interval', - required=True, - encrypted=False, - default=None, - validator=validator.Pattern( - regex=r"""^\-[1-9]\d*$|^\d*$""", - ) - ), - field.RestField( - 'index', - required=True, - encrypted=False, - default='default', - validator=validator.String( - min_len=1, - max_len=80, - ) - ), - field.RestField( - 'cisco_dna_center_host', - required=True, - encrypted=False, - default=None, - validator=validator.String( - min_len=0, - max_len=8192, - ) - ), - field.RestField( - 'cisco_dna_center_account', - required=True, - encrypted=False, - default=None, - validator=None - ), - - field.RestField( - 'disabled', - required=False, - validator=None - ) - -] -model = RestModel(fields, name=None) - - - -endpoint = DataInputModel( - 'cisco_dnac_image', - model, -) - - -if __name__ == '__main__': - admin_external.handle( - endpoint, - handler=ConfigMigrationHandler, - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_sensordevice.py b/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_sensordevice.py deleted file mode 100755 index a708949..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/Splunk_TA_cisco_dnacenter_rh_cisco_dnac_sensordevice.py +++ /dev/null @@ -1,75 +0,0 @@ - -import splunk_ta_cisco_dnacenter_declare - -from splunktaucclib.rest_handler.endpoint import ( - field, - validator, - RestModel, - DataInputModel, -) -from splunktaucclib.rest_handler import admin_external, util -from splunk_aoblib.rest_migration import ConfigMigrationHandler - -util.remove_http_proxy_env_vars() - - -fields = [ - field.RestField( - 'interval', - required=True, - encrypted=False, - default=None, - validator=validator.Pattern( - regex=r"""^\-[1-9]\d*$|^\d*$""", - ) - ), - field.RestField( - 'index', - required=True, - encrypted=False, - default='default', - validator=validator.String( - min_len=1, - max_len=80, - ) - ), - field.RestField( - 'cisco_dna_center_host', - required=True, - encrypted=False, - default=None, - validator=validator.String( - min_len=0, - max_len=8192, - ) - ), - field.RestField( - 'cisco_dna_center_account', - required=True, - encrypted=False, - default=None, - validator=None - ), - - field.RestField( - 'disabled', - required=False, - validator=None - ) - -] -model = RestModel(fields, name=None) - - - -endpoint = DataInputModel( - 'cisco_dnac_sensordevice', - model, -) - - -if __name__ == '__main__': - admin_external.handle( - endpoint, - handler=ConfigMigrationHandler, - ) diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_fabricdevice.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_fabricdevice.py deleted file mode 100755 index f14c6e1..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_fabricdevice.py +++ /dev/null @@ -1,95 +0,0 @@ -import splunk_ta_cisco_dnacenter_declare - -import os -import sys -import time -import datetime -import json - -import modinput_wrapper.base_modinput -from splunklib import modularinput as smi - - - -import input_module_cisco_dnac_fabricdevice as input_module - -bin_dir = os.path.basename(__file__) - -''' - Do not edit this file!!! - This file is generated by Add-on builder automatically. - Add your modular input logic to file input_module_cisco_dnac_fabricdevice.py -''' -class ModInputcisco_dnac_fabricdevice(modinput_wrapper.base_modinput.BaseModInput): - - def __init__(self): - if 'use_single_instance_mode' in dir(input_module): - use_single_instance = input_module.use_single_instance_mode() - else: - use_single_instance = False - super(ModInputcisco_dnac_fabricdevice, self).__init__("splunk_ta_cisco_dnacenter", "cisco_dnac_fabricdevice", use_single_instance) - self.global_checkbox_fields = None - - def get_scheme(self): - """overloaded splunklib modularinput method""" - scheme = super(ModInputcisco_dnac_fabricdevice, self).get_scheme() - scheme.title = ("cisco_dnac_fabricdevice") - scheme.description = ("Go to the add-on\'s configuration UI and configure modular inputs under the Inputs menu.") - scheme.use_external_validation = True - scheme.streaming_mode_xml = True - - scheme.add_argument(smi.Argument("name", title="Name", - description="", - required_on_create=True)) - - """ - For customized inputs, hard code the arguments here to hide argument detail from users. - For other input types, arguments should be get from input_module. Defining new input types could be easier. - """ - scheme.add_argument(smi.Argument("cisco_dna_center_host", title="Cisco DNA Center Host", - description="", - required_on_create=True, - required_on_edit=False)) - scheme.add_argument(smi.Argument("cisco_dna_center_account", title="Cisco DNA Center Account", - description="", - required_on_create=True, - required_on_edit=False)) - return scheme - - def get_app_name(self): - return "Splunk-TA-cisco-dnacenter" - - def validate_input(self, definition): - """validate the input stanza""" - input_module.validate_input(self, definition) - - def collect_events(self, ew): - """write out the events""" - input_module.collect_events(self, ew) - - def get_account_fields(self): - account_fields = [] - account_fields.append("cisco_dna_center_account") - return account_fields - - def get_checkbox_fields(self): - checkbox_fields = [] - return checkbox_fields - - def get_global_checkbox_fields(self): - if self.global_checkbox_fields is None: - checkbox_name_file = os.path.join(bin_dir, 'global_checkbox_param.json') - try: - if os.path.isfile(checkbox_name_file): - with open(checkbox_name_file, 'r') as fp: - self.global_checkbox_fields = json.load(fp) - else: - self.global_checkbox_fields = [] - except Exception as e: - self.log_error('Get exception when loading global checkbox parameter names. ' + str(e)) - self.global_checkbox_fields = [] - return self.global_checkbox_fields - -if __name__ == "__main__": - exitcode = ModInputcisco_dnac_fabricdevice().run(sys.argv) - sys.exit(exitcode) diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_fabricsite.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_fabricsite.py deleted file mode 100755 index 8c96435..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_fabricsite.py +++ /dev/null @@ -1,95 +0,0 @@ -import splunk_ta_cisco_dnacenter_declare - -import os -import sys -import time -import datetime -import json - -import modinput_wrapper.base_modinput -from splunklib import modularinput as smi - - - -import input_module_cisco_dnac_fabricsite as input_module - -bin_dir = os.path.basename(__file__) - -''' - Do not edit this file!!! - This file is generated by Add-on builder automatically. - Add your modular input logic to file input_module_cisco_dnac_fabricsite.py -''' -class ModInputcisco_dnac_fabricsite(modinput_wrapper.base_modinput.BaseModInput): - - def __init__(self): - if 'use_single_instance_mode' in dir(input_module): - use_single_instance = input_module.use_single_instance_mode() - else: - use_single_instance = False - super(ModInputcisco_dnac_fabricsite, self).__init__("splunk_ta_cisco_dnacenter", "cisco_dnac_fabricsite", use_single_instance) - self.global_checkbox_fields = None - - def get_scheme(self): - """overloaded splunklib modularinput method""" - scheme = super(ModInputcisco_dnac_fabricsite, self).get_scheme() - scheme.title = ("cisco_dnac_fabricsite") - scheme.description = ("Go to the add-on\'s configuration UI and configure modular inputs under the Inputs menu.") - scheme.use_external_validation = True - scheme.streaming_mode_xml = True - - scheme.add_argument(smi.Argument("name", title="Name", - description="", - required_on_create=True)) - - """ - For customized inputs, hard code the arguments here to hide argument detail from users. - For other input types, arguments should be get from input_module. Defining new input types could be easier. - """ - scheme.add_argument(smi.Argument("cisco_dna_center_host", title="Cisco DNA Center Host", - description="", - required_on_create=True, - required_on_edit=False)) - scheme.add_argument(smi.Argument("cisco_dna_center_account", title="Cisco DNA Center Account", - description="", - required_on_create=True, - required_on_edit=False)) - return scheme - - def get_app_name(self): - return "Splunk-TA-cisco-dnacenter" - - def validate_input(self, definition): - """validate the input stanza""" - input_module.validate_input(self, definition) - - def collect_events(self, ew): - """write out the events""" - input_module.collect_events(self, ew) - - def get_account_fields(self): - account_fields = [] - account_fields.append("cisco_dna_center_account") - return account_fields - - def get_checkbox_fields(self): - checkbox_fields = [] - return checkbox_fields - - def get_global_checkbox_fields(self): - if self.global_checkbox_fields is None: - checkbox_name_file = os.path.join(bin_dir, 'global_checkbox_param.json') - try: - if os.path.isfile(checkbox_name_file): - with open(checkbox_name_file, 'r') as fp: - self.global_checkbox_fields = json.load(fp) - else: - self.global_checkbox_fields = [] - except Exception as e: - self.log_error('Get exception when loading global checkbox parameter names. ' + str(e)) - self.global_checkbox_fields = [] - return self.global_checkbox_fields - -if __name__ == "__main__": - exitcode = ModInputcisco_dnac_fabricsite().run(sys.argv) - sys.exit(exitcode) diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_image.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_image.py deleted file mode 100755 index 797ec39..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_image.py +++ /dev/null @@ -1,95 +0,0 @@ -import splunk_ta_cisco_dnacenter_declare - -import os -import sys -import time -import datetime -import json - -import modinput_wrapper.base_modinput -from splunklib import modularinput as smi - - - -import input_module_cisco_dnac_image as input_module - -bin_dir = os.path.basename(__file__) - -''' - Do not edit this file!!! - This file is generated by Add-on builder automatically. - Add your modular input logic to file input_module_cisco_dnac_image.py -''' -class ModInputcisco_dnac_image(modinput_wrapper.base_modinput.BaseModInput): - - def __init__(self): - if 'use_single_instance_mode' in dir(input_module): - use_single_instance = input_module.use_single_instance_mode() - else: - use_single_instance = False - super(ModInputcisco_dnac_image, self).__init__("splunk_ta_cisco_dnacenter", "cisco_dnac_image", use_single_instance) - self.global_checkbox_fields = None - - def get_scheme(self): - """overloaded splunklib modularinput method""" - scheme = super(ModInputcisco_dnac_image, self).get_scheme() - scheme.title = ("cisco_dnac_image") - scheme.description = ("Go to the add-on\'s configuration UI and configure modular inputs under the Inputs menu.") - scheme.use_external_validation = True - scheme.streaming_mode_xml = True - - scheme.add_argument(smi.Argument("name", title="Name", - description="", - required_on_create=True)) - - """ - For customized inputs, hard code the arguments here to hide argument detail from users. - For other input types, arguments should be get from input_module. Defining new input types could be easier. - """ - scheme.add_argument(smi.Argument("cisco_dna_center_host", title="Cisco DNA Center Host", - description="", - required_on_create=True, - required_on_edit=False)) - scheme.add_argument(smi.Argument("cisco_dna_center_account", title="Cisco DNA Center Account", - description="", - required_on_create=True, - required_on_edit=False)) - return scheme - - def get_app_name(self): - return "Splunk-TA-cisco-dnacenter" - - def validate_input(self, definition): - """validate the input stanza""" - input_module.validate_input(self, definition) - - def collect_events(self, ew): - """write out the events""" - input_module.collect_events(self, ew) - - def get_account_fields(self): - account_fields = [] - account_fields.append("cisco_dna_center_account") - return account_fields - - def get_checkbox_fields(self): - checkbox_fields = [] - return checkbox_fields - - def get_global_checkbox_fields(self): - if self.global_checkbox_fields is None: - checkbox_name_file = os.path.join(bin_dir, 'global_checkbox_param.json') - try: - if os.path.isfile(checkbox_name_file): - with open(checkbox_name_file, 'r') as fp: - self.global_checkbox_fields = json.load(fp) - else: - self.global_checkbox_fields = [] - except Exception as e: - self.log_error('Get exception when loading global checkbox parameter names. ' + str(e)) - self.global_checkbox_fields = [] - return self.global_checkbox_fields - -if __name__ == "__main__": - exitcode = ModInputcisco_dnac_image().run(sys.argv) - sys.exit(exitcode) diff --git a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_sensordevice.py b/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_sensordevice.py deleted file mode 100755 index 393e509..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/cisco_dnac_sensordevice.py +++ /dev/null @@ -1,95 +0,0 @@ -import splunk_ta_cisco_dnacenter_declare - -import os -import sys -import time -import datetime -import json - -import modinput_wrapper.base_modinput -from splunklib import modularinput as smi - - - -import input_module_cisco_dnac_sensordevice as input_module - -bin_dir = os.path.basename(__file__) - -''' - Do not edit this file!!! - This file is generated by Add-on builder automatically. - Add your modular input logic to file input_module_cisco_dnac_sensordevice.py -''' -class ModInputcisco_dnac_sensordevice(modinput_wrapper.base_modinput.BaseModInput): - - def __init__(self): - if 'use_single_instance_mode' in dir(input_module): - use_single_instance = input_module.use_single_instance_mode() - else: - use_single_instance = False - super(ModInputcisco_dnac_sensordevice, self).__init__("splunk_ta_cisco_dnacenter", "cisco_dnac_sensordevice", use_single_instance) - self.global_checkbox_fields = None - - def get_scheme(self): - """overloaded splunklib modularinput method""" - scheme = super(ModInputcisco_dnac_sensordevice, self).get_scheme() - scheme.title = ("cisco_dnac_sensordevice") - scheme.description = ("Go to the add-on\'s configuration UI and configure modular inputs under the Inputs menu.") - scheme.use_external_validation = True - scheme.streaming_mode_xml = True - - scheme.add_argument(smi.Argument("name", title="Name", - description="", - required_on_create=True)) - - """ - For customized inputs, hard code the arguments here to hide argument detail from users. - For other input types, arguments should be get from input_module. Defining new input types could be easier. - """ - scheme.add_argument(smi.Argument("cisco_dna_center_host", title="Cisco DNA Center Host", - description="", - required_on_create=True, - required_on_edit=False)) - scheme.add_argument(smi.Argument("cisco_dna_center_account", title="Cisco DNA Center Account", - description="", - required_on_create=True, - required_on_edit=False)) - return scheme - - def get_app_name(self): - return "Splunk-TA-cisco-dnacenter" - - def validate_input(self, definition): - """validate the input stanza""" - input_module.validate_input(self, definition) - - def collect_events(self, ew): - """write out the events""" - input_module.collect_events(self, ew) - - def get_account_fields(self): - account_fields = [] - account_fields.append("cisco_dna_center_account") - return account_fields - - def get_checkbox_fields(self): - checkbox_fields = [] - return checkbox_fields - - def get_global_checkbox_fields(self): - if self.global_checkbox_fields is None: - checkbox_name_file = os.path.join(bin_dir, 'global_checkbox_param.json') - try: - if os.path.isfile(checkbox_name_file): - with open(checkbox_name_file, 'r') as fp: - self.global_checkbox_fields = json.load(fp) - else: - self.global_checkbox_fields = [] - except Exception as e: - self.log_error('Get exception when loading global checkbox parameter names. ' + str(e)) - self.global_checkbox_fields = [] - return self.global_checkbox_fields - -if __name__ == "__main__": - exitcode = ModInputcisco_dnac_sensordevice().run(sys.argv) - sys.exit(exitcode) diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_fabricdevice.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_fabricdevice.py deleted file mode 100755 index 8bd6a8a..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_fabricdevice.py +++ /dev/null @@ -1,238 +0,0 @@ - -# encoding = utf-8 - -import datetime -import json -import os -import sys -import time - -import cisco_dnac_api as api - -''' - IMPORTANT - Edit only the validate_input and collect_events functions. - Do not edit any other part in this file. - This file is generated only once when creating the modular input. -''' -''' -# For advanced users, if you want to create single instance mod input, uncomment this method. -def use_single_instance_mode(): - return True -''' - - -def get_epoch_current_previous_times(interval_seconds): - """ - This function will return the epoch time for the {timestamp} and a previous epoch time - :return: epoch time (now) including msec, epoch time (previous) including msec - """ - # REVIEW: It is recommended that this time matches the Splunk's data input interval - now = datetime.datetime.now() - rounded = now - datetime.timedelta( - seconds=now.second % interval_seconds + interval_seconds - if interval_seconds > 0 - else now.second - ) - now = now.replace(microsecond=0) - rounded = rounded.replace(microsecond=0) - return (int(now.timestamp() * 1000), int(rounded.timestamp() * 1000)) - - -def simplify_device_health(device_health_response): - """ - This function will simplify the device health data for Splunk searches - :param device_item: device health data - :return: new device health response - """ - response = {} - if device_health_response: - response["DeviceName"] = device_health_response.get("name") - response["DeviceModel"] = device_health_response.get("model") - response["DeviceLocation"] = device_health_response.get("location") - response["DeviceIpAddress"] = device_health_response.get("ipAddress") - response["DeviceFamily"] = device_health_response.get("deviceFamily") - response["DeviceType"] = device_health_response.get("deviceType") - response["DeviceMACAddress"] = device_health_response.get("macAddress") - return response - - -def simplify_devices_health(helper, dnac, devices_health_responses): - """ - This function will simplify the device health data and add the fabric site - :param dnac: Cisco DNAC SDK api - :param devices_health_responses: devices health response - :return: new fabric devices response - """ - fabric_devices_response = [] - for device_health in devices_health_responses: - device = simplify_device_health(device_health) - device_key = device["DeviceLocation"] - fabric_site = get_simplified_fabric_site(helper, dnac, device_key) - fabric_device_new = dict(device) - if fabric_site: - fabric_device_new.update({"HasFabric": "True"}) - else: - fabric_device_new.update({"HasFabric": "False"}) - fabric_device_new.update(fabric_site) - fabric_devices_response.append(fabric_device_new) - return fabric_devices_response - - -def get_device_health(helper, dnac, input_interval): - """ - This function will retrieve the device health from a previous time to the time the function is called - :param dnac: Cisco DNAC SDK api - :return: device health response - """ - (epoch_current_time, epoch_previous_time) = get_epoch_current_previous_times( - input_interval - ) - limit = 20 - offset = 1 - devices_responses = [] - do_request_next = True - while do_request_next: - try: - health_response = dnac.devices.devices( - start_time=epoch_previous_time, - end_time=epoch_current_time, - limit=limit, - offset=offset, - ) - if health_response and health_response.response: - devices_responses.extend( - simplify_devices_health(helper, dnac, health_response.response) - ) - if len(health_response.response) < limit: - do_request_next = False - break - else: - do_request_next = False - break - except Exception: - do_request_next = False - break - offset = offset + limit - return devices_responses - - -def get_simplified_fabric_site(helper, dnac, site_name_hierarchy): - """ - This function will retrieve the fabric site and simplify them - :param dnac: Cisco DNAC SDK api - :param site_name_hierarchy: site name hierarchy - :return: fabric site response - """ - response = {} - try: - fabric_site = dnac.sda.get_site(site_name_hierarchy) - if fabric_site.get("status") == "success": - response["FabricSiteNameHierarchy"] = ( - fabric_site.get("siteNameHierarchy") or "" - ) - response["FabricName"] = fabric_site.get("fabricName") or "" - response["FabricType"] = fabric_site.get("fabricType") or "" - response["FabricDomainType"] = fabric_site.get("fabricDomainType") or "" - return response - except Exception as e: - import traceback - helper.log_error(traceback.format_exc()) - helper.log_error('Error getting site. ' + str(e)) - return response - - -def is_different(helper, state, item): - if not isinstance(state, dict): - helper.log_debug("is_different. The state is not a dictionary.") - return True - if not isinstance(item, dict): - helper.log_debug("is_different. The item is not a dictionary.") - return True - keys = set(state.keys()) - keys = keys.union(set(item.keys())) - properties = list(keys) - for property_ in properties: - if state.get(property_) != item.get(property_): - helper.log_debug( - "is_different. The state and item have different values for property '{0}', values are {1} and {2}.".format( - property_, - state.get(property_), - item.get(property_), - ) - ) - return True - return False - - -def validate_input(helper, definition): - """Implement your own validation logic to validate the input stanza configurations""" - # This example accesses the modular input variable - cisco_dna_center_host = definition.parameters.get("cisco_dna_center_host", None) - cisco_dna_center_account = definition.parameters.get( - "cisco_dna_center_account", None - ) - # review: check cisco_dna_center_host - if not isinstance(cisco_dna_center_host, str): - raise TypeError("URL must be string") - if not cisco_dna_center_host.startswith("https"): - raise ValueError("URL must be HTTPS") - pass - - -def collect_events(helper, ew): - opt_cisco_dna_center_host = helper.get_arg("cisco_dna_center_host") - opt_cisco_dna_center_account = helper.get_arg("cisco_dna_center_account") - - account_username = opt_cisco_dna_center_account.get("username", None) - account_password = opt_cisco_dna_center_account.get("password", None) - account_name = opt_cisco_dna_center_account.get("name", None) - current_version = "2.2.3.3" - current_verify = False - current_debug = False - - # use default input_interval - input_interval = 900 - try: - input_interval = int(helper.get_arg("interval")) - except ValueError as e: - input_interval = 900 - - dnac = api.DNACenterAPI( - username=account_username, - password=account_password, - base_url=opt_cisco_dna_center_host, - version=current_version, - verify=current_verify, - debug=current_debug, - helper=helper, - ) - - # get the devices (health) and fabric sites associated with them - devices_health = get_device_health(helper, dnac, input_interval) - - r_json = [] - for item in devices_health: - key = "{0}_{1}".format(opt_cisco_dna_center_host, item["DeviceIpAddress"]) - state = helper.get_check_point(key) - item["cisco_dnac_host"] = opt_cisco_dna_center_host - if state is None: - helper.save_check_point(key, item) - r_json.append(item) - elif is_different(helper, state, item): - helper.save_check_point(key, item) - r_json.append(item) - # helper.delete_check_point(key) - - # To create a splunk event - event = helper.new_event( - json.dumps(devices_health), - time=None, - host=None, - index=None, - source=None, - sourcetype=None, - done=True, - unbroken=True, - ) - ew.write_event(event) diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_fabricsite.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_fabricsite.py deleted file mode 100755 index 695b251..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_fabricsite.py +++ /dev/null @@ -1,234 +0,0 @@ - -# encoding = utf-8 - -import datetime -import json -import os -import sys -import time - -import cisco_dnac_api as api - -''' - IMPORTANT - Edit only the validate_input and collect_events functions. - Do not edit any other part in this file. - This file is generated only once when creating the modular input. -''' -''' -# For advanced users, if you want to create single instance mod input, uncomment this method. -def use_single_instance_mode(): - return True -''' - - -def simplify_site(site_resp): - """ - This function will simplify the site data for Splunk searches - :param site_resp: Cisco DNAC SDK api - :return: new site response - """ - simplified_site = {} - if site_resp: - simplified_site["SiteName"] = site_resp.get("name") - simplified_site["SiteNameHierarchy"] = site_resp.get("siteNameHierarchy") - simplified_site["SiteType"] = "area" - if site_resp.get("additionalInfo"): - if len(site_resp["additionalInfo"]) > 0: - for additional_info in site_resp["additionalInfo"]: - if isinstance(additional_info, dict) and additional_info.get( - "attributes" - ): - if isinstance( - additional_info["attributes"], dict - ) and additional_info["attributes"].get("type"): - simplified_site["SiteType"] = additional_info[ - "attributes" - ].get("type") - return simplified_site - else: - return {} - - -def simplify_sites(helper, dnac, sites_response, seen): - """ - This function will filter and simplify the sites data for Splunk searches - :param dnac: Cisco DNAC SDK api - :param sites_response: Cisco DNAC SDK api - :param seen: set to filter previous SiteNameHierarchy values - :return: new sites response - """ - fabric_sites_response = [] - for site_response in sites_response: - site = simplify_site(site_response) - site_key = site["SiteNameHierarchy"] - # if it was seen before, skip it - if site_key in seen: - continue - # if not added - seen.add(site_key) - helper.log_debug( - "Getting the fabric site data from the site name hierarchy {0}".format( - site_key - ) - ) - fabric_site = get_simplified_fabric_site(helper, dnac, site_key) - fabric_site_new = dict(site) - if fabric_site: - fabric_site_new.update({"HasFabric": "True"}) - helper.log_debug( - "Saved the fabric site data from the site name hierarchy {0}".format( - site_key - ) - ) - else: - fabric_site_new.update({"HasFabric": "False"}) - fabric_site_new.update(fabric_site) - fabric_sites_response.append(fabric_site_new) - return fabric_sites_response - - -def get_simplified_sites(helper, dnac): - """ - This function will retrieve the sites and simplify them - :param dnac: Cisco DNAC SDK api - :return: sites response - """ - limit = 20 - offset = 1 - sites_responses = [] - do_request_next = True - seen = set() - while do_request_next: - try: - sites_response = dnac.sites.get_site(limit=str(limit), offset=str(offset)) - if sites_response and sites_response.response: - sites_responses.extend( - simplify_sites(helper, dnac, sites_response.response, seen) - ) - if len(sites_response.response) < limit: - do_request_next = False - break - else: - do_request_next = False - break - except Exception: - do_request_next = False - break - offset = offset + limit - return sites_responses - - -def get_simplified_fabric_site(helper, dnac, site_name_hierarchy): - """ - This function will retrieve the fabric site and simplify them - :param dnac: Cisco DNAC SDK api - :param site_name_hierarchy: site name hierarchy - :return: fabric site response - """ - response = {} - try: - fabric_site = dnac.sda.get_site(site_name_hierarchy) - if fabric_site.get("status") == "success": - response["FabricSiteNameHierarchy"] = ( - fabric_site.get("siteNameHierarchy") or "" - ) - response["FabricName"] = fabric_site.get("fabricName") or "" - response["FabricType"] = fabric_site.get("fabricType") or "" - response["FabricDomainType"] = fabric_site.get("fabricDomainType") or "" - return response - except Exception as e: - import traceback - helper.log_error(traceback.format_exc()) - helper.log_error('Error getting site. ' + str(e)) - return response - - -def is_different(helper, state, item): - if not isinstance(state, dict): - helper.log_debug("is_different. The state is not a dictionary.") - return True - if not isinstance(item, dict): - helper.log_debug("is_different. The item is not a dictionary.") - return True - keys = set(state.keys()) - keys = keys.union(set(item.keys())) - properties = list(keys) - for property_ in properties: - if state.get(property_) != item.get(property_): - helper.log_debug( - "is_different. The state and item have different values for property '{0}', values are {1} and {2}.".format( - property_, - state.get(property_), - item.get(property_), - ) - ) - return True - return False - - -def validate_input(helper, definition): - """Implement your own validation logic to validate the input stanza configurations""" - # This example accesses the modular input variable - cisco_dna_center_host = definition.parameters.get("cisco_dna_center_host", None) - cisco_dna_center_account = definition.parameters.get( - "cisco_dna_center_account", None - ) - # review: check cisco_dna_center_host - if not isinstance(cisco_dna_center_host, str): - raise TypeError("URL must be string") - if not cisco_dna_center_host.startswith("https"): - raise ValueError("URL must be HTTPS") - pass - - -def collect_events(helper, ew): - opt_cisco_dna_center_host = helper.get_arg("cisco_dna_center_host") - opt_cisco_dna_center_account = helper.get_arg("cisco_dna_center_account") - - account_username = opt_cisco_dna_center_account.get("username", None) - account_password = opt_cisco_dna_center_account.get("password", None) - account_name = opt_cisco_dna_center_account.get("name", None) - current_version = "2.2.3.3" - current_verify = False - current_debug = False - - dnac = api.DNACenterAPI( - username=account_username, - password=account_password, - base_url=opt_cisco_dna_center_host, - version=current_version, - verify=current_verify, - debug=current_debug, - helper=helper, - ) - - r_json = [] - - # get the sites and fabric sites associated with them - simplified_sites = get_simplified_sites(helper, dnac) - - for item in simplified_sites: - key = "{0}_{1}".format(opt_cisco_dna_center_host, item["SiteNameHierarchy"]) - state = helper.get_check_point(key) - item["cisco_dnac_host"] = opt_cisco_dna_center_host - if state is None: - helper.save_check_point(key, item) - r_json.append(item) - elif is_different(helper, state, item): - helper.save_check_point(key, item) - r_json.append(item) - # helper.delete_check_point(key) - - # To create a splunk event - event = helper.new_event( - json.dumps(r_json), - time=None, - host=None, - index=None, - source=None, - sourcetype=None, - done=True, - unbroken=True, - ) - ew.write_event(event) diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_image.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_image.py deleted file mode 100755 index 7889ea8..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_image.py +++ /dev/null @@ -1,523 +0,0 @@ - -# encoding = utf-8 - -import datetime -import json -import os -import re -import sys -import time - -import cisco_dnac_api as api - -''' - IMPORTANT - Edit only the validate_input and collect_events functions. - Do not edit any other part in this file. - This file is generated only once when creating the modular input. -''' -''' -# For advanced users, if you want to create single instance mod input, uncomment this method. -def use_single_instance_mode(): - return True -''' - - -def get_important_image_details(details): - """ - This function will simplify the image details for Splunk searches - :param details: image details - :return: simplified image response - """ - response = {} - if details: - response["DeviceImageUpgradeStatus"] = ( - details.get("deviceImageUpgradeStatus") or "" - ) - response["UpgradeStatus"] = details.get("upgradeStatus") or "" - response["DeviceInstalledInfoImageUuid"] = "" - response["DeviceInstalledInfoName"] = "" - response["DeviceInstalledInfoType"] = "" - response["DeviceInstalledInfoVersion"] = "" - response["DeviceInstalledInfoDisplayVersion"] = "" - response["DeviceInstalledInfoFamily"] = "" - response["DeviceInstalledInfoGolden"] = "False" - response["DeviceInstalledInfoSubpackageType"] = "" - response["HasTargetImageInfo"] = "False" - response["ReadinessCheckStatusStatus"] = "" - response["ReadinessCheckStatusIsReadyForUpgrade"] = "False" - if details.get("deviceInstalledInfo") and isinstance( - details["deviceInstalledInfo"], list - ): - if len(details["deviceInstalledInfo"]) > 0: - response["DeviceInstalledInfoImageUuid"] = ( - details["deviceInstalledInfo"][0].get("imageUuid") or "" - ) - response["DeviceInstalledInfoName"] = ( - details["deviceInstalledInfo"][0].get("name") or "" - ) - response["DeviceInstalledInfoType"] = ( - details["deviceInstalledInfo"][0].get("type") or "" - ) - response["DeviceInstalledInfoVersion"] = ( - details["deviceInstalledInfo"][0].get("version") or "" - ) - response["DeviceInstalledInfoDisplayVersion"] = ( - details["deviceInstalledInfo"][0].get("displayVersion") or "" - ) - response["DeviceInstalledInfoFamily"] = ( - details["deviceInstalledInfo"][0].get("family") or "" - ) - response["DeviceInstalledInfoGolden"] = ( - str(details["deviceInstalledInfo"][0].get("golden")) or "False" - ) - response["DeviceInstalledInfoSubpackageType"] = ( - details["deviceInstalledInfo"][0].get("subpackageType") or "" - ) - if details.get("targetImageInfo") and isinstance( - details["targetImageInfo"], list - ): - response["HasTargetImageInfo"] = "True" - response["TargetImageInfo"] = details["targetImageInfo"] - if details.get("readinessCheckStatus") and isinstance( - details["readinessCheckStatus"], dict - ): - response["ReadinessCheckStatusStatus"] = details[ - "readinessCheckStatus" - ].get("status") - response["ReadinessCheckStatusIsReadyForUpgrade"] = str( - details["readinessCheckStatus"].get("isReadyForUpgrade") - ) - return response - - -def get_device_image_stack_details(dnac, device_id): - """ - This function will retrieve the specific device image stack details - :param dnac: Cisco DNAC SDK api - :param device_id: device id - :return: device image stack response - """ - try: - stack_details = dnac.devices.get_stack_details_for_device(device_id) - if stack_details and stack_details.response: - return stack_details.response - return None - except: - return None - - -def get_devices(dnac): - """ - This function will retrieve all the devices - :param dnac: Cisco DNAC SDK api - :return: devices response - """ - limit = 20 - offset = 1 - devices_responses = [] - do_request_next = True - while do_request_next: - try: - device_response = dnac.devices.get_device_list(limit=limit, offset=offset) - if device_response and device_response.response: - devices_responses.extend(device_response.response) - if len(device_response.response) < limit: - do_request_next = False - break - else: - do_request_next = False - break - except Exception: - do_request_next = False - break - offset = offset + limit - return devices_responses - - -def get_important_device_values(device_item): - """ - This function will simplify the device information for Splunk searches - :param device_item: device information - :return: simplified device response - """ - response = {} - response["DeviceID"] = device_item.get("id") or "" - response["DeviceName"] = device_item.get("hostname") or "N/A" - response["DeviceIpAddress"] = ( - device_item.get("managementIpAddress") or device_item.get("ipAddress") or "" - ) - response["DeviceFamily"] = device_item.get("family") or "" - response["DeviceMACAddress"] = ( - device_item.get("macAddress") or device_item.get("apEthernetMacAddress") or "" - ) - response["DeviceRole"] = device_item.get("role") or "UNKNOWN" - response["DeviceImageVersion"] = device_item.get("softwareVersion") or "" - response["DeviceUptime"] = device_item.get("upTime") or "" - if device_item.get("uptimeSeconds") is not None: - response["DeviceUptimeSeconds"] = device_item.get("uptimeSeconds") - else: - response["DeviceUptimeSeconds"] = 0 - response["DeviceLastUpdated"] = device_item.get("lastUpdated") or "" - if device_item.get("lastUpdateTime") is not None: - response["DeviceLastUpdateTime"] = device_item.get("lastUpdateTime") - else: - response["DeviceLastUpdateTime"] = 0 - response["DeviceSerialNumber"] = device_item.get("serialNumber") or "" - response["DeviceSeries"] = device_item.get("series") or "" - response["DevicePlatform"] = device_item.get("platformId") or "" - response["DeviceSupportType"] = device_item.get("deviceSupportLevel") or "" - return response - - -def get_important_stack_details(details): - """ - This function will simplify the image stack details for Splunk searches - :param details: image stack details - :return: simplified image stack response - """ - response = {"HasStack": "False"} - response["SwitchInfoAmount"] = 0 - response["StackSwitchInfoSoftwareImage"] = "N/A" - response["StackSwitchInfoRole"] = "" - response["StackSwitchInfoState"] = "" - response["StackSwitchInfoIsReady"] = "False" - response["SwitchInfo"] = [] - response["StackPortInfoAmount"] = 0 - response["StackPortInfo"] = [] - response["StackSvlSwitchInfoAmount"] = 0 - response["StackSvlSwitchInfo"] = [] - if details: - if details.get("stackSwitchInfo"): - response["HasStack"] = "True" - response["SwitchInfoAmount"] = ( - len(details["stackSwitchInfo"]) - if isinstance(details["stackSwitchInfo"], list) - else 0 - ) - response["SwitchInfo"] = details["stackSwitchInfo"] - if len(details.get("stackSwitchInfo")) > 0: - response["StackSwitchInfoSoftwareImage"] = ( - details["stackSwitchInfo"][0].get("softwareImage") or "N/A" - ) - response["StackSwitchInfoRole"] = ( - details["stackSwitchInfo"][0].get("role") or "" - ) - response["StackSwitchInfoState"] = ( - details["stackSwitchInfo"][0].get("state") or "" - ) - response["StackSwitchInfoIsReady"] = str( - details["stackSwitchInfo"][0].get("state") == "READY" - ) - if details.get("stackPortInfo"): - response["HasStack"] = "True" - response["StackPortInfoAmount"] = ( - len(details["stackPortInfo"]) - if isinstance(details["stackPortInfo"], list) - else 0 - ) - response["StackPortInfo"] = details["stackPortInfo"] - if details.get("svlSwitchInfo"): - response["HasStack"] = "True" - response["StackSvlSwitchInfoAmount"] = ( - len(details["svlSwitchInfo"]) - if isinstance(details["svlSwitchInfo"], list) - else 0 - ) - response["StackSvlSwitchInfo"] = details["svlSwitchInfo"] - return response - - -def filter_data_as_images_1(helper, dnac, images_items, devices_items): - """ - This function will filter data to get the overall image health data. - :param dnac: Cisco DNAC SDK api - :param images_items: images - :param devices_items: devices - :return: image health summary response - """ - summary_response = [] - image_dict_summary = {} - image_dict = {} - - for image_item in images_items: - image_key = image_item["ImageSimpleName"].lower() - if image_dict_summary.get(image_key) is None: - image_dict_summary[image_key] = {} - if image_dict.get(image_key) is None: - image_dict[image_key] = {} - - image_key = image_item["ImageSimpleName"].lower() - image_dict_summary[image_key] = dict(image_item) - image_dict_summary[image_key]["ImageDevicesAmount"] = 0 - image_dict_summary[image_key]["ImageSummary"] = "True" - image_dict[image_key] = dict(image_item) - image_dict[image_key]["ImageDevicesAmount"] = 0 - image_dict[image_key]["ImageSummary"] = "False" - - if image_dict_summary[image_key].get("ImageVersionsAmount") is None: - image_dict_summary[image_key]["ImageVersionsAmount"] = 0 - image_dict_summary[image_key]["ImageImageUuid"] = [] - if image_dict[image_key].get("ImageVersionsAmount") is None: - image_dict[image_key]["ImageVersionsAmount"] = 0 - image_dict[image_key]["ImageImageUuid"] = [] - image_dict_summary[image_key]["ImageVersionsAmount"] += 1 - image_dict[image_key]["ImageVersionsAmount"] += 1 - image_dict_summary[image_key]["ImageImageUuid"].append(image_item.get("ImageImageUuid")) - image_dict[image_key]["ImageImageUuid"].append(image_item.get("ImageImageUuid")) - - for device_item in devices_items: - helper.log_debug( - "Getting the stack details from the device id {0}".format( - device_item.get("id") - ) - ) - stack_detail = get_important_stack_details( - get_device_image_stack_details(dnac, device_item.get("id")) - ) - stack_detail_key = "StackSwitchInfoSoftwareImage" - - device_detail = get_important_device_values(device_item) - - # Note. skip unknown case - if stack_detail is None or stack_detail.get(stack_detail_key) is None: - continue - - image_key_2 = stack_detail[stack_detail_key].lower() - image_dict_summary_keys = list(image_dict_summary.keys()) - for i in image_dict_summary_keys: - if i == image_key_2 or image_key_2.startswith(i): - image_key_2 = i - - # Setting image_dict_summary - if image_dict_summary.get(image_key_2) is None: - image_dict_summary[image_key_2] = { - "ImageDevicesAmount": 0, "ImageSummary": "True" - } - # N/A from stack detail. - if stack_detail[stack_detail_key] == "N/A": - image_dict_summary[image_key_2]["ImageDevicesAmount"] += 1 - image_dict_summary[image_key_2]["ImageName"] = "N/A" - image_dict_summary[image_key_2]["ImageImageName"] = "N/A" - image_dict_summary[image_key_2]["ImageImageUuid"] = "" - image_dict_summary[image_key_2]["ImageFamily"] = "" - image_dict_summary[image_key_2]["ImageVersion"] = "" - image_dict_summary[image_key_2]["ImageDisplayVersion"] = "" - image_dict_summary[image_key_2]["ImageSimpleName"] = image_key_2 - # Setting image_dict - image_dict[image_key_2] = dict(image_dict_summary[image_key_2]) - image_dict[image_key_2]["ImageSummary"] = "False" - image_dict[image_key_2]["ImageDevicesAmount"] = 1 - image_dict[image_key_2].update(device_detail) - image_dict[image_key_2].update(stack_detail) - # Adding current image_dict to summary_response - summary_response.append(dict(image_dict[image_key_2])) - continue - - if image_dict_summary.get(image_key_2): - image_dict_summary[image_key_2]["ImageDevicesAmount"] += 1 - image_dict[image_key_2]["ImageDevicesAmount"] += 1 - image_dict[image_key_2].update(device_detail) - image_dict[image_key_2].update(stack_detail) - summary_response.append(dict(image_dict[image_key_2])) - summary_response.extend(list(image_dict_summary.values())) - return summary_response - - -def filter_data_as_images(helper, dnac, images_items, devices_items): - """ - This function will call another function that filters data to get the overall image health data. - :param dnac: Cisco DNAC SDK api - :param images_items: images - :param devices_items: devices - :return: image health summary response - """ - return filter_data_as_images_1(helper, dnac, images_items, devices_items) - - -def simplify_name(name, display_version): - """ - This function trims from the image name, removing the display_version and other information, to use the new image name as an index - :param name: image name - :param display_version: image display version - :return: simplified image name - """ - if name: - return re.sub(r'\.\d{2}.*', '', name) - else: - return "" - - -def get_important_image_info(image_response): - response = {} - response["ImageImageUuid"] = image_response.get("imageUuid") - response["ImageName"] = image_response.get("name") - response["ImageImageName"] = image_response.get("imageName") - response["ImageFamily"] = image_response.get("family") - response["ImageVersion"] = image_response.get("version") - response["ImageDisplayVersion"] = image_response.get( - "displayVersion" - ) - response["ImageVendor"] = image_response.get("vendor") - response["ImageIntegrityStatus"] = image_response.get("imageIntegrityStatus") - response["ImageIsGolden"] = str(image_response.get("isTaggedGolden")) - response["ImageSimpleName"] = simplify_name( - image_response.get("name"), image_response.get("displayVersion") - ) - return response - - -def get_images(dnac): - """ - This function will retrieve the images found in SWIM - :param dnac: Cisco DNAC SDK api - :return: images response - """ - responses = [] - images_response_fn = None - images_response = None - # Select function - if hasattr(dnac, "software_image_management_swim"): - images_response_fn = ( - dnac.software_image_management_swim.get_software_image_details - ) - elif hasattr(dnac, "swim"): - images_response_fn = dnac.swim.get_software_image_details - # If not found return fast (fail silently) - if images_response_fn is None: - return responses - - limit = 20 - offset = 1 - do_request_next = True - while do_request_next: - try: - images_response = images_response_fn(limit=limit, offset=offset) - if images_response and images_response.response: - for image_response in images_response.response: - responses.append(get_important_image_info(image_response)) - if len(images_response.response) < limit: - do_request_next = False - break - else: - do_request_next = False - break - except Exception: - do_request_next = False - break - offset = offset + limit - return responses - - -def is_different(helper, state, item): - if not isinstance(state, dict): - helper.log_debug("is_different. The state is not a dictionary.") - return True - if not isinstance(item, dict): - helper.log_debug("is_different. The item is not a dictionary.") - return True - keys = set(state.keys()) - keys = keys.union(set(item.keys())) - properties = list(keys) - for property_ in properties: - if state.get(property_) != item.get(property_): - helper.log_debug( - "is_different. The state and item have different values for property '{0}', values are {1} and {2}.".format( - property_, - state.get(property_), - item.get(property_), - ) - ) - return True - return False - - -def validate_input(helper, definition): - """Implement your own validation logic to validate the input stanza configurations""" - # This example accesses the modular input variable - cisco_dna_center_host = definition.parameters.get("cisco_dna_center_host", None) - cisco_dna_center_account = definition.parameters.get( - "cisco_dna_center_account", None - ) - # review: check cisco_dna_center_host - if not isinstance(cisco_dna_center_host, str): - raise TypeError("URL must be string") - if not cisco_dna_center_host.startswith("https"): - raise ValueError("URL must be HTTPS") - pass - - -def collect_events(helper, ew): - opt_cisco_dna_center_host = helper.get_arg("cisco_dna_center_host") - opt_cisco_dna_center_account = helper.get_arg("cisco_dna_center_account") - - account_username = opt_cisco_dna_center_account.get("username", None) - account_password = opt_cisco_dna_center_account.get("password", None) - account_name = opt_cisco_dna_center_account.get("name", None) - current_version = "2.2.3.3" - current_verify = False - current_debug = False - - # use default input_interval - input_interval = 900 - try: - input_interval = int(helper.get_arg("interval")) - except ValueError as e: - input_interval = 900 - - dnac = api.DNACenterAPI( - username=account_username, - password=account_password, - base_url=opt_cisco_dna_center_host, - version=current_version, - verify=current_verify, - debug=current_debug, - helper=helper, - ) - - r_json = [] - - # get the devices - devices_items = get_devices(dnac) - # get the images - images_items = get_images(dnac) - # merge and simplify gathered information - overall_result = filter_data_as_images(helper, dnac, images_items, devices_items) - - for item in overall_result: - item["cisco_dnac_host"] = opt_cisco_dna_center_host - if item["ImageSummary"] == "True": - key = "{0}_{1}".format(opt_cisco_dna_center_host, item.get("ImageName")) - state = helper.get_check_point(key) - if state is None: - helper.save_check_point(key, item) - r_json.append(item) - elif is_different(helper, state, item): - helper.save_check_point(key, item) - r_json.append(item) - # helper.delete_check_point(key) - else: - key = "{0}_{1}_{2}".format( - opt_cisco_dna_center_host, item.get("ImageName"), item.get("DeviceID") - ) - state = helper.get_check_point(key) - if state is None: - helper.save_check_point(key, item) - r_json.append(item) - elif is_different(helper, state, item): - helper.save_check_point(key, item) - r_json.append(item) - # helper.delete_check_point(key) - - # To create a splunk event - event = helper.new_event( - json.dumps(r_json), - time=None, - host=None, - index=None, - source=None, - sourcetype=None, - done=True, - unbroken=True, - ) - ew.write_event(event) diff --git a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_sensordevice.py b/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_sensordevice.py deleted file mode 100755 index 70f86ad..0000000 --- a/Splunk-TA-cisco-dnacenter/bin/input_module_cisco_dnac_sensordevice.py +++ /dev/null @@ -1,135 +0,0 @@ - -# encoding = utf-8 - -import os -import json -import sys -import time -import datetime - -import cisco_dnac_api as api - -''' - IMPORTANT - Edit only the validate_input and collect_events functions. - Do not edit any other part in this file. - This file is generated only once when creating the modular input. -''' -''' -# For advanced users, if you want to create single instance mod input, uncomment this method. -def use_single_instance_mode(): - return True -''' - - -def get_sensors(dnac): - """ - This function will retrieve the sensor details - :param dnac: Cisco DNAC SDK api - :return: sensor details - """ - responses = [] - sensor_response = dnac.sensors.sensors() - if sensor_response and sensor_response.response: - for sensor_item in sensor_response.response: - sensor_item_new = dict(sensor_item) - if sensor_item_new.get("sshConfig"): - sensor_item_new["sshState"] = sensor_item_new["sshConfig"].get( - "sshState" - ) - sensor_item_new["sshUserName"] = sensor_item_new["sshConfig"].get( - "sshUserName" - ) - sensor_item_new["sshPassword"] = sensor_item_new["sshConfig"].get( - "sshPassword" - ) - sensor_item_new["enablePassword"] = sensor_item_new["sshConfig"].get( - "enablePassword" - ) - sensor_item_new.pop("sshConfig", None) - if sensor_item_new.get("isLEDEnabled") is not None: - sensor_item_new["isLEDEnabled"] = str(sensor_item_new["isLEDEnabled"]) - responses.append(sensor_item_new) - return responses - - -def is_different(helper, state, item): - if not isinstance(state, dict): - helper.log_debug("is_different. The state is not a dictionary.") - return True - if not isinstance(item, dict): - helper.log_debug("is_different. The item is not a dictionary.") - return True - keys = set(state.keys()) - keys = keys.union(set(item.keys())) - properties = list(keys) - for property_ in properties: - if state.get(property_) != item.get(property_): - helper.log_debug( - "is_different. The state and item have different values for property '{0}', values are {1} and {2}.".format( - property_, - state.get(property_), - item.get(property_), - ) - ) - return True - return False - - -def validate_input(helper, definition): - """Implement your own validation logic to validate the input stanza configurations""" - # This example accesses the modular input variable - cisco_dna_center_host = definition.parameters.get("cisco_dna_center_host", None) - cisco_dna_center_account = definition.parameters.get( - "cisco_dna_center_account", None - ) - # review: check cisco_dna_center_host - if not isinstance(cisco_dna_center_host, str): - raise TypeError("URL must be string") - if not cisco_dna_center_host.startswith("https"): - raise ValueError("URL must be HTTPS") - pass - - -def collect_events(helper, ew): - opt_cisco_dna_center_host = helper.get_arg("cisco_dna_center_host") - opt_cisco_dna_center_account = helper.get_arg("cisco_dna_center_account") - - account_username = opt_cisco_dna_center_account.get("username", None) - account_password = opt_cisco_dna_center_account.get("password", None) - account_name = opt_cisco_dna_center_account.get("name", None) - current_version = "2.2.3.3" - current_verify = False - current_debug = False - - dnac = api.DNACenterAPI( - username=account_username, - password=account_password, - base_url=opt_cisco_dna_center_host, - version=current_version, - verify=current_verify, - debug=current_debug, - helper=helper, - ) - - r_json = [] - - # get the sensor details - overall_sensors = get_sensors(dnac) - - for item in overall_sensors: - item["cisco_dnac_host"] = opt_cisco_dna_center_host - r_json.append(item) - - # To create a splunk event - event = helper.new_event( - json.dumps(r_json), - time=None, - host=None, - index=None, - source=None, - sourcetype=None, - done=True, - unbroken=True, - ) - ew.write_event(event) From 1364e7a5f115a77980311ce5565e1dcdcac9a6c6 Mon Sep 17 00:00:00 2001 From: William Astorga Date: Fri, 13 Oct 2023 00:23:20 -0600 Subject: [PATCH 4/6] fix: update builder_version from 4.1.0 to 4.1.3, part 3 --- .../default/data/ui/nav/default.xml | 2 +- Splunk-TA-cisco-dnacenter/default/inputs.conf | 21 ++++++---- Splunk-TA-cisco-dnacenter/default/props.conf | 38 +++---------------- .../default/restmap.conf | 16 ++++---- Splunk-TA-cisco-dnacenter/default/web.conf | 16 ++++---- 5 files changed, 36 insertions(+), 57 deletions(-) diff --git a/Splunk-TA-cisco-dnacenter/default/data/ui/nav/default.xml b/Splunk-TA-cisco-dnacenter/default/data/ui/nav/default.xml index e9c2d0a..000fbb4 100644 --- a/Splunk-TA-cisco-dnacenter/default/data/ui/nav/default.xml +++ b/Splunk-TA-cisco-dnacenter/default/data/ui/nav/default.xml @@ -1,4 +1,4 @@ -